jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include "jtag/interface.h"
33 #include "smp.h"
34 #include <helper/time_support.h>
35
36 enum restart_mode {
37 RESTART_LAZY,
38 RESTART_SYNC,
39 };
40
41 enum halt_mode {
42 HALT_LAZY,
43 HALT_SYNC,
44 };
45
46 struct aarch64_private_config {
47 struct adiv5_private_config adiv5_config;
48 struct arm_cti *cti;
49 };
50
51 static int aarch64_poll(struct target *target);
52 static int aarch64_debug_entry(struct target *target);
53 static int aarch64_restore_context(struct target *target, bool bpwp);
54 static int aarch64_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int aarch64_set_context_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int aarch64_set_hybrid_breakpoint(struct target *target,
59 struct breakpoint *breakpoint);
60 static int aarch64_unset_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int aarch64_mmu(struct target *target, int *enabled);
63 static int aarch64_virt2phys(struct target *target,
64 target_addr_t virt, target_addr_t *phys);
65 static int aarch64_read_cpu_memory(struct target *target,
66 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
67
68 static int aarch64_restore_system_control_reg(struct target *target)
69 {
70 enum arm_mode target_mode = ARM_MODE_ANY;
71 int retval = ERROR_OK;
72 uint32_t instr;
73
74 struct aarch64_common *aarch64 = target_to_aarch64(target);
75 struct armv8_common *armv8 = target_to_armv8(target);
76
77 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
78 aarch64->system_control_reg_curr = aarch64->system_control_reg;
79 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
80
81 switch (armv8->arm.core_mode) {
82 case ARMV8_64_EL0T:
83 target_mode = ARMV8_64_EL1H;
84 /* fall through */
85 case ARMV8_64_EL1T:
86 case ARMV8_64_EL1H:
87 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
88 break;
89 case ARMV8_64_EL2T:
90 case ARMV8_64_EL2H:
91 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
92 break;
93 case ARMV8_64_EL3H:
94 case ARMV8_64_EL3T:
95 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
96 break;
97
98 case ARM_MODE_SVC:
99 case ARM_MODE_ABT:
100 case ARM_MODE_FIQ:
101 case ARM_MODE_IRQ:
102 case ARM_MODE_SYS:
103 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
104 break;
105
106 default:
107 LOG_INFO("cannot read system control register in this mode");
108 return ERROR_FAIL;
109 }
110
111 if (target_mode != ARM_MODE_ANY)
112 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
113
114 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
115 if (retval != ERROR_OK)
116 return retval;
117
118 if (target_mode != ARM_MODE_ANY)
119 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
120 }
121
122 return retval;
123 }
124
125 /* modify system_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int aarch64_mmu_modify(struct target *target, int enable)
129 {
130 struct aarch64_common *aarch64 = target_to_aarch64(target);
131 struct armv8_common *armv8 = &aarch64->armv8_common;
132 int retval = ERROR_OK;
133 uint32_t instr = 0;
134
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(aarch64->system_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(aarch64->system_control_reg_curr & 0x1U))
142 aarch64->system_control_reg_curr |= 0x1U;
143 } else {
144 if (aarch64->system_control_reg_curr & 0x4U) {
145 /* data cache is active */
146 aarch64->system_control_reg_curr &= ~0x4U;
147 /* flush data cache armv8 function to be called */
148 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
149 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
150 }
151 if ((aarch64->system_control_reg_curr & 0x1U)) {
152 aarch64->system_control_reg_curr &= ~0x1U;
153 }
154 }
155
156 switch (armv8->arm.core_mode) {
157 case ARMV8_64_EL0T:
158 case ARMV8_64_EL1T:
159 case ARMV8_64_EL1H:
160 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
161 break;
162 case ARMV8_64_EL2T:
163 case ARMV8_64_EL2H:
164 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
165 break;
166 case ARMV8_64_EL3H:
167 case ARMV8_64_EL3T:
168 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
169 break;
170
171 case ARM_MODE_SVC:
172 case ARM_MODE_ABT:
173 case ARM_MODE_FIQ:
174 case ARM_MODE_IRQ:
175 case ARM_MODE_SYS:
176 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
177 break;
178
179 default:
180 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
181 break;
182 }
183
184 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
185 aarch64->system_control_reg_curr);
186 return retval;
187 }
188
189 /*
190 * Basic debug access, very low level assumes state is saved
191 */
192 static int aarch64_init_debug_access(struct target *target)
193 {
194 struct armv8_common *armv8 = target_to_armv8(target);
195 int retval;
196 uint32_t dummy;
197
198 LOG_DEBUG("%s", target_name(target));
199
200 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
201 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
202 if (retval != ERROR_OK) {
203 LOG_DEBUG("Examine %s failed", "oslock");
204 return retval;
205 }
206
207 /* Clear Sticky Power Down status Bit in PRSR to enable access to
208 the registers in the Core Power Domain */
209 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
210 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
211 if (retval != ERROR_OK)
212 return retval;
213
214 /*
215 * Static CTI configuration:
216 * Channel 0 -> trigger outputs HALT request to PE
217 * Channel 1 -> trigger outputs Resume request to PE
218 * Gate all channel trigger events from entering the CTM
219 */
220
221 /* Enable CTI */
222 retval = arm_cti_enable(armv8->cti, true);
223 /* By default, gate all channel events to and from the CTM */
224 if (retval == ERROR_OK)
225 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
226 /* output halt requests to PE on channel 0 event */
227 if (retval == ERROR_OK)
228 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
229 /* output restart requests to PE on channel 1 event */
230 if (retval == ERROR_OK)
231 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
232 if (retval != ERROR_OK)
233 return retval;
234
235 /* Resync breakpoint registers */
236
237 return ERROR_OK;
238 }
239
240 /* Write to memory mapped registers directly with no cache or mmu handling */
241 static int aarch64_dap_write_memap_register_u32(struct target *target,
242 uint32_t address,
243 uint32_t value)
244 {
245 int retval;
246 struct armv8_common *armv8 = target_to_armv8(target);
247
248 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
249
250 return retval;
251 }
252
253 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
254 {
255 struct arm_dpm *dpm = &a8->armv8_common.dpm;
256 int retval;
257
258 dpm->arm = &a8->armv8_common.arm;
259 dpm->didr = debug;
260
261 retval = armv8_dpm_setup(dpm);
262 if (retval == ERROR_OK)
263 retval = armv8_dpm_initialize(dpm);
264
265 return retval;
266 }
267
268 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
269 {
270 struct armv8_common *armv8 = target_to_armv8(target);
271 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
272 }
273
274 static int aarch64_check_state_one(struct target *target,
275 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
276 {
277 struct armv8_common *armv8 = target_to_armv8(target);
278 uint32_t prsr;
279 int retval;
280
281 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
282 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
283 if (retval != ERROR_OK)
284 return retval;
285
286 if (p_prsr)
287 *p_prsr = prsr;
288
289 if (p_result)
290 *p_result = (prsr & mask) == (val & mask);
291
292 return ERROR_OK;
293 }
294
295 static int aarch64_wait_halt_one(struct target *target)
296 {
297 int retval = ERROR_OK;
298 uint32_t prsr;
299
300 int64_t then = timeval_ms();
301 for (;;) {
302 int halted;
303
304 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
305 if (retval != ERROR_OK || halted)
306 break;
307
308 if (timeval_ms() > then + 1000) {
309 retval = ERROR_TARGET_TIMEOUT;
310 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
311 break;
312 }
313 }
314 return retval;
315 }
316
317 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
318 {
319 int retval = ERROR_OK;
320 struct target_list *head = target->head;
321 struct target *first = NULL;
322
323 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
324
325 while (head != NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328 head = head->next;
329
330 if (exc_target && curr == target)
331 continue;
332 if (!target_was_examined(curr))
333 continue;
334 if (curr->state != TARGET_RUNNING)
335 continue;
336
337 /* HACK: mark this target as prepared for halting */
338 curr->debug_reason = DBG_REASON_DBGRQ;
339
340 /* open the gate for channel 0 to let HALT requests pass to the CTM */
341 retval = arm_cti_ungate_channel(armv8->cti, 0);
342 if (retval == ERROR_OK)
343 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
344 if (retval != ERROR_OK)
345 break;
346
347 LOG_DEBUG("target %s prepared", target_name(curr));
348
349 if (first == NULL)
350 first = curr;
351 }
352
353 if (p_first) {
354 if (exc_target && first)
355 *p_first = first;
356 else
357 *p_first = target;
358 }
359
360 return retval;
361 }
362
363 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
364 {
365 int retval = ERROR_OK;
366 struct armv8_common *armv8 = target_to_armv8(target);
367
368 LOG_DEBUG("%s", target_name(target));
369
370 /* allow Halting Debug Mode */
371 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
372 if (retval != ERROR_OK)
373 return retval;
374
375 /* trigger an event on channel 0, this outputs a halt request to the PE */
376 retval = arm_cti_pulse_channel(armv8->cti, 0);
377 if (retval != ERROR_OK)
378 return retval;
379
380 if (mode == HALT_SYNC) {
381 retval = aarch64_wait_halt_one(target);
382 if (retval != ERROR_OK) {
383 if (retval == ERROR_TARGET_TIMEOUT)
384 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
385 return retval;
386 }
387 }
388
389 return ERROR_OK;
390 }
391
392 static int aarch64_halt_smp(struct target *target, bool exc_target)
393 {
394 struct target *next = target;
395 int retval;
396
397 /* prepare halt on all PEs of the group */
398 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
399
400 if (exc_target && next == target)
401 return retval;
402
403 /* halt the target PE */
404 if (retval == ERROR_OK)
405 retval = aarch64_halt_one(next, HALT_LAZY);
406
407 if (retval != ERROR_OK)
408 return retval;
409
410 /* wait for all PEs to halt */
411 int64_t then = timeval_ms();
412 for (;;) {
413 bool all_halted = true;
414 struct target_list *head;
415 struct target *curr;
416
417 foreach_smp_target(head, target->head) {
418 int halted;
419
420 curr = head->target;
421
422 if (!target_was_examined(curr))
423 continue;
424
425 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
426 if (retval != ERROR_OK || !halted) {
427 all_halted = false;
428 break;
429 }
430 }
431
432 if (all_halted)
433 break;
434
435 if (timeval_ms() > then + 1000) {
436 retval = ERROR_TARGET_TIMEOUT;
437 break;
438 }
439
440 /*
441 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
442 * and it looks like the CTI's are not connected by a common
443 * trigger matrix. It seems that we need to halt one core in each
444 * cluster explicitly. So if we find that a core has not halted
445 * yet, we trigger an explicit halt for the second cluster.
446 */
447 retval = aarch64_halt_one(curr, HALT_LAZY);
448 if (retval != ERROR_OK)
449 break;
450 }
451
452 return retval;
453 }
454
455 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
456 {
457 struct target *gdb_target = NULL;
458 struct target_list *head;
459 struct target *curr;
460
461 if (debug_reason == DBG_REASON_NOTHALTED) {
462 LOG_DEBUG("Halting remaining targets in SMP group");
463 aarch64_halt_smp(target, true);
464 }
465
466 /* poll all targets in the group, but skip the target that serves GDB */
467 foreach_smp_target(head, target->head) {
468 curr = head->target;
469 /* skip calling context */
470 if (curr == target)
471 continue;
472 if (!target_was_examined(curr))
473 continue;
474 /* skip targets that were already halted */
475 if (curr->state == TARGET_HALTED)
476 continue;
477 /* remember the gdb_service->target */
478 if (curr->gdb_service != NULL)
479 gdb_target = curr->gdb_service->target;
480 /* skip it */
481 if (curr == gdb_target)
482 continue;
483
484 /* avoid recursion in aarch64_poll() */
485 curr->smp = 0;
486 aarch64_poll(curr);
487 curr->smp = 1;
488 }
489
490 /* after all targets were updated, poll the gdb serving target */
491 if (gdb_target != NULL && gdb_target != target)
492 aarch64_poll(gdb_target);
493
494 return ERROR_OK;
495 }
496
497 /*
498 * Aarch64 Run control
499 */
500
501 static int aarch64_poll(struct target *target)
502 {
503 enum target_state prev_target_state;
504 int retval = ERROR_OK;
505 int halted;
506
507 retval = aarch64_check_state_one(target,
508 PRSR_HALT, PRSR_HALT, &halted, NULL);
509 if (retval != ERROR_OK)
510 return retval;
511
512 if (halted) {
513 prev_target_state = target->state;
514 if (prev_target_state != TARGET_HALTED) {
515 enum target_debug_reason debug_reason = target->debug_reason;
516
517 /* We have a halting debug event */
518 target->state = TARGET_HALTED;
519 LOG_DEBUG("Target %s halted", target_name(target));
520 retval = aarch64_debug_entry(target);
521 if (retval != ERROR_OK)
522 return retval;
523
524 if (target->smp)
525 update_halt_gdb(target, debug_reason);
526
527 if (arm_semihosting(target, &retval) != 0)
528 return retval;
529
530 switch (prev_target_state) {
531 case TARGET_RUNNING:
532 case TARGET_UNKNOWN:
533 case TARGET_RESET:
534 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
535 break;
536 case TARGET_DEBUG_RUNNING:
537 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
538 break;
539 default:
540 break;
541 }
542 }
543 } else
544 target->state = TARGET_RUNNING;
545
546 return retval;
547 }
548
549 static int aarch64_halt(struct target *target)
550 {
551 struct armv8_common *armv8 = target_to_armv8(target);
552 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
553
554 if (target->smp)
555 return aarch64_halt_smp(target, false);
556
557 return aarch64_halt_one(target, HALT_SYNC);
558 }
559
560 static int aarch64_restore_one(struct target *target, int current,
561 uint64_t *address, int handle_breakpoints, int debug_execution)
562 {
563 struct armv8_common *armv8 = target_to_armv8(target);
564 struct arm *arm = &armv8->arm;
565 int retval;
566 uint64_t resume_pc;
567
568 LOG_DEBUG("%s", target_name(target));
569
570 if (!debug_execution)
571 target_free_all_working_areas(target);
572
573 /* current = 1: continue on current pc, otherwise continue at <address> */
574 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
575 if (!current)
576 resume_pc = *address;
577 else
578 *address = resume_pc;
579
580 /* Make sure that the Armv7 gdb thumb fixups does not
581 * kill the return address
582 */
583 switch (arm->core_state) {
584 case ARM_STATE_ARM:
585 resume_pc &= 0xFFFFFFFC;
586 break;
587 case ARM_STATE_AARCH64:
588 resume_pc &= 0xFFFFFFFFFFFFFFFC;
589 break;
590 case ARM_STATE_THUMB:
591 case ARM_STATE_THUMB_EE:
592 /* When the return address is loaded into PC
593 * bit 0 must be 1 to stay in Thumb state
594 */
595 resume_pc |= 0x1;
596 break;
597 case ARM_STATE_JAZELLE:
598 LOG_ERROR("How do I resume into Jazelle state??");
599 return ERROR_FAIL;
600 }
601 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
602 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
603 arm->pc->dirty = true;
604 arm->pc->valid = true;
605
606 /* called it now before restoring context because it uses cpu
607 * register r0 for restoring system control register */
608 retval = aarch64_restore_system_control_reg(target);
609 if (retval == ERROR_OK)
610 retval = aarch64_restore_context(target, handle_breakpoints);
611
612 return retval;
613 }
614
615 /**
616 * prepare single target for restart
617 *
618 *
619 */
620 static int aarch64_prepare_restart_one(struct target *target)
621 {
622 struct armv8_common *armv8 = target_to_armv8(target);
623 int retval;
624 uint32_t dscr;
625 uint32_t tmp;
626
627 LOG_DEBUG("%s", target_name(target));
628
629 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
630 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
631 if (retval != ERROR_OK)
632 return retval;
633
634 if ((dscr & DSCR_ITE) == 0)
635 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
636 if ((dscr & DSCR_ERR) != 0)
637 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
638
639 /* acknowledge a pending CTI halt event */
640 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
641 /*
642 * open the CTI gate for channel 1 so that the restart events
643 * get passed along to all PEs. Also close gate for channel 0
644 * to isolate the PE from halt events.
645 */
646 if (retval == ERROR_OK)
647 retval = arm_cti_ungate_channel(armv8->cti, 1);
648 if (retval == ERROR_OK)
649 retval = arm_cti_gate_channel(armv8->cti, 0);
650
651 /* make sure that DSCR.HDE is set */
652 if (retval == ERROR_OK) {
653 dscr |= DSCR_HDE;
654 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
655 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
656 }
657
658 if (retval == ERROR_OK) {
659 /* clear sticky bits in PRSR, SDR is now 0 */
660 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
661 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
662 }
663
664 return retval;
665 }
666
667 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
668 {
669 struct armv8_common *armv8 = target_to_armv8(target);
670 int retval;
671
672 LOG_DEBUG("%s", target_name(target));
673
674 /* trigger an event on channel 1, generates a restart request to the PE */
675 retval = arm_cti_pulse_channel(armv8->cti, 1);
676 if (retval != ERROR_OK)
677 return retval;
678
679 if (mode == RESTART_SYNC) {
680 int64_t then = timeval_ms();
681 for (;;) {
682 int resumed;
683 /*
684 * if PRSR.SDR is set now, the target did restart, even
685 * if it's now already halted again (e.g. due to breakpoint)
686 */
687 retval = aarch64_check_state_one(target,
688 PRSR_SDR, PRSR_SDR, &resumed, NULL);
689 if (retval != ERROR_OK || resumed)
690 break;
691
692 if (timeval_ms() > then + 1000) {
693 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
694 retval = ERROR_TARGET_TIMEOUT;
695 break;
696 }
697 }
698 }
699
700 if (retval != ERROR_OK)
701 return retval;
702
703 target->debug_reason = DBG_REASON_NOTHALTED;
704 target->state = TARGET_RUNNING;
705
706 return ERROR_OK;
707 }
708
709 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
710 {
711 int retval;
712
713 LOG_DEBUG("%s", target_name(target));
714
715 retval = aarch64_prepare_restart_one(target);
716 if (retval == ERROR_OK)
717 retval = aarch64_do_restart_one(target, mode);
718
719 return retval;
720 }
721
722 /*
723 * prepare all but the current target for restart
724 */
725 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
726 {
727 int retval = ERROR_OK;
728 struct target_list *head;
729 struct target *first = NULL;
730 uint64_t address;
731
732 foreach_smp_target(head, target->head) {
733 struct target *curr = head->target;
734
735 /* skip calling target */
736 if (curr == target)
737 continue;
738 if (!target_was_examined(curr))
739 continue;
740 if (curr->state != TARGET_HALTED)
741 continue;
742
743 /* resume at current address, not in step mode */
744 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
745 if (retval == ERROR_OK)
746 retval = aarch64_prepare_restart_one(curr);
747 if (retval != ERROR_OK) {
748 LOG_ERROR("failed to restore target %s", target_name(curr));
749 break;
750 }
751 /* remember the first valid target in the group */
752 if (first == NULL)
753 first = curr;
754 }
755
756 if (p_first)
757 *p_first = first;
758
759 return retval;
760 }
761
762
763 static int aarch64_step_restart_smp(struct target *target)
764 {
765 int retval = ERROR_OK;
766 struct target_list *head;
767 struct target *first = NULL;
768
769 LOG_DEBUG("%s", target_name(target));
770
771 retval = aarch64_prep_restart_smp(target, 0, &first);
772 if (retval != ERROR_OK)
773 return retval;
774
775 if (first != NULL)
776 retval = aarch64_do_restart_one(first, RESTART_LAZY);
777 if (retval != ERROR_OK) {
778 LOG_DEBUG("error restarting target %s", target_name(first));
779 return retval;
780 }
781
782 int64_t then = timeval_ms();
783 for (;;) {
784 struct target *curr = target;
785 bool all_resumed = true;
786
787 foreach_smp_target(head, target->head) {
788 uint32_t prsr;
789 int resumed;
790
791 curr = head->target;
792
793 if (curr == target)
794 continue;
795
796 if (!target_was_examined(curr))
797 continue;
798
799 retval = aarch64_check_state_one(curr,
800 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
801 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
802 all_resumed = false;
803 break;
804 }
805
806 if (curr->state != TARGET_RUNNING) {
807 curr->state = TARGET_RUNNING;
808 curr->debug_reason = DBG_REASON_NOTHALTED;
809 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
810 }
811 }
812
813 if (all_resumed)
814 break;
815
816 if (timeval_ms() > then + 1000) {
817 LOG_ERROR("%s: timeout waiting for target resume", __func__);
818 retval = ERROR_TARGET_TIMEOUT;
819 break;
820 }
821 /*
822 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
823 * and it looks like the CTI's are not connected by a common
824 * trigger matrix. It seems that we need to halt one core in each
825 * cluster explicitly. So if we find that a core has not halted
826 * yet, we trigger an explicit resume for the second cluster.
827 */
828 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
829 if (retval != ERROR_OK)
830 break;
831 }
832
833 return retval;
834 }
835
836 static int aarch64_resume(struct target *target, int current,
837 target_addr_t address, int handle_breakpoints, int debug_execution)
838 {
839 int retval = 0;
840 uint64_t addr = address;
841
842 struct armv8_common *armv8 = target_to_armv8(target);
843 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
844
845 if (target->state != TARGET_HALTED)
846 return ERROR_TARGET_NOT_HALTED;
847
848 /*
849 * If this target is part of a SMP group, prepare the others
850 * targets for resuming. This involves restoring the complete
851 * target register context and setting up CTI gates to accept
852 * resume events from the trigger matrix.
853 */
854 if (target->smp) {
855 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
856 if (retval != ERROR_OK)
857 return retval;
858 }
859
860 /* all targets prepared, restore and restart the current target */
861 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
862 debug_execution);
863 if (retval == ERROR_OK)
864 retval = aarch64_restart_one(target, RESTART_SYNC);
865 if (retval != ERROR_OK)
866 return retval;
867
868 if (target->smp) {
869 int64_t then = timeval_ms();
870 for (;;) {
871 struct target *curr = target;
872 struct target_list *head;
873 bool all_resumed = true;
874
875 foreach_smp_target(head, target->head) {
876 uint32_t prsr;
877 int resumed;
878
879 curr = head->target;
880 if (curr == target)
881 continue;
882 if (!target_was_examined(curr))
883 continue;
884
885 retval = aarch64_check_state_one(curr,
886 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
887 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
888 all_resumed = false;
889 break;
890 }
891
892 if (curr->state != TARGET_RUNNING) {
893 curr->state = TARGET_RUNNING;
894 curr->debug_reason = DBG_REASON_NOTHALTED;
895 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
896 }
897 }
898
899 if (all_resumed)
900 break;
901
902 if (timeval_ms() > then + 1000) {
903 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
904 retval = ERROR_TARGET_TIMEOUT;
905 break;
906 }
907
908 /*
909 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
910 * and it looks like the CTI's are not connected by a common
911 * trigger matrix. It seems that we need to halt one core in each
912 * cluster explicitly. So if we find that a core has not halted
913 * yet, we trigger an explicit resume for the second cluster.
914 */
915 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
916 if (retval != ERROR_OK)
917 break;
918 }
919 }
920
921 if (retval != ERROR_OK)
922 return retval;
923
924 target->debug_reason = DBG_REASON_NOTHALTED;
925
926 if (!debug_execution) {
927 target->state = TARGET_RUNNING;
928 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
929 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
930 } else {
931 target->state = TARGET_DEBUG_RUNNING;
932 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
933 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
934 }
935
936 return ERROR_OK;
937 }
938
939 static int aarch64_debug_entry(struct target *target)
940 {
941 int retval = ERROR_OK;
942 struct armv8_common *armv8 = target_to_armv8(target);
943 struct arm_dpm *dpm = &armv8->dpm;
944 enum arm_state core_state;
945 uint32_t dscr;
946
947 /* make sure to clear all sticky errors */
948 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
949 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
950 if (retval == ERROR_OK)
951 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
952 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
953 if (retval == ERROR_OK)
954 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
955
956 if (retval != ERROR_OK)
957 return retval;
958
959 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
960
961 dpm->dscr = dscr;
962 core_state = armv8_dpm_get_core_state(dpm);
963 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
964 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
965
966 /* close the CTI gate for all events */
967 if (retval == ERROR_OK)
968 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
969 /* discard async exceptions */
970 if (retval == ERROR_OK)
971 retval = dpm->instr_cpsr_sync(dpm);
972 if (retval != ERROR_OK)
973 return retval;
974
975 /* Examine debug reason */
976 armv8_dpm_report_dscr(dpm, dscr);
977
978 /* save address of instruction that triggered the watchpoint? */
979 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
980 uint32_t tmp;
981 uint64_t wfar = 0;
982
983 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
984 armv8->debug_base + CPUV8_DBG_WFAR1,
985 &tmp);
986 if (retval != ERROR_OK)
987 return retval;
988 wfar = tmp;
989 wfar = (wfar << 32);
990 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
991 armv8->debug_base + CPUV8_DBG_WFAR0,
992 &tmp);
993 if (retval != ERROR_OK)
994 return retval;
995 wfar |= tmp;
996 armv8_dpm_report_wfar(&armv8->dpm, wfar);
997 }
998
999 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1000
1001 if (retval == ERROR_OK && armv8->post_debug_entry)
1002 retval = armv8->post_debug_entry(target);
1003
1004 return retval;
1005 }
1006
1007 static int aarch64_post_debug_entry(struct target *target)
1008 {
1009 struct aarch64_common *aarch64 = target_to_aarch64(target);
1010 struct armv8_common *armv8 = &aarch64->armv8_common;
1011 int retval;
1012 enum arm_mode target_mode = ARM_MODE_ANY;
1013 uint32_t instr;
1014
1015 switch (armv8->arm.core_mode) {
1016 case ARMV8_64_EL0T:
1017 target_mode = ARMV8_64_EL1H;
1018 /* fall through */
1019 case ARMV8_64_EL1T:
1020 case ARMV8_64_EL1H:
1021 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1022 break;
1023 case ARMV8_64_EL2T:
1024 case ARMV8_64_EL2H:
1025 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1026 break;
1027 case ARMV8_64_EL3H:
1028 case ARMV8_64_EL3T:
1029 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1030 break;
1031
1032 case ARM_MODE_SVC:
1033 case ARM_MODE_ABT:
1034 case ARM_MODE_FIQ:
1035 case ARM_MODE_IRQ:
1036 case ARM_MODE_SYS:
1037 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1038 break;
1039
1040 default:
1041 LOG_INFO("cannot read system control register in this mode");
1042 return ERROR_FAIL;
1043 }
1044
1045 if (target_mode != ARM_MODE_ANY)
1046 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1047
1048 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1049 if (retval != ERROR_OK)
1050 return retval;
1051
1052 if (target_mode != ARM_MODE_ANY)
1053 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1054
1055 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1056 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1057
1058 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1059 armv8_identify_cache(armv8);
1060 armv8_read_mpidr(armv8);
1061 }
1062
1063 armv8->armv8_mmu.mmu_enabled =
1064 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1065 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1066 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1067 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1068 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1069 return ERROR_OK;
1070 }
1071
1072 /*
1073 * single-step a target
1074 */
1075 static int aarch64_step(struct target *target, int current, target_addr_t address,
1076 int handle_breakpoints)
1077 {
1078 struct armv8_common *armv8 = target_to_armv8(target);
1079 struct aarch64_common *aarch64 = target_to_aarch64(target);
1080 int saved_retval = ERROR_OK;
1081 int retval;
1082 uint32_t edecr;
1083
1084 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1085
1086 if (target->state != TARGET_HALTED) {
1087 LOG_WARNING("target not halted");
1088 return ERROR_TARGET_NOT_HALTED;
1089 }
1090
1091 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1092 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1093 /* make sure EDECR.SS is not set when restoring the register */
1094
1095 if (retval == ERROR_OK) {
1096 edecr &= ~0x4;
1097 /* set EDECR.SS to enter hardware step mode */
1098 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1099 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1100 }
1101 /* disable interrupts while stepping */
1102 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1103 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1104 /* bail out if stepping setup has failed */
1105 if (retval != ERROR_OK)
1106 return retval;
1107
1108 if (target->smp && (current == 1)) {
1109 /*
1110 * isolate current target so that it doesn't get resumed
1111 * together with the others
1112 */
1113 retval = arm_cti_gate_channel(armv8->cti, 1);
1114 /* resume all other targets in the group */
1115 if (retval == ERROR_OK)
1116 retval = aarch64_step_restart_smp(target);
1117 if (retval != ERROR_OK) {
1118 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1119 return retval;
1120 }
1121 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1122 }
1123
1124 /* all other targets running, restore and restart the current target */
1125 retval = aarch64_restore_one(target, current, &address, 0, 0);
1126 if (retval == ERROR_OK)
1127 retval = aarch64_restart_one(target, RESTART_LAZY);
1128
1129 if (retval != ERROR_OK)
1130 return retval;
1131
1132 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1133 if (!handle_breakpoints)
1134 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1135
1136 int64_t then = timeval_ms();
1137 for (;;) {
1138 int stepped;
1139 uint32_t prsr;
1140
1141 retval = aarch64_check_state_one(target,
1142 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1143 if (retval != ERROR_OK || stepped)
1144 break;
1145
1146 if (timeval_ms() > then + 100) {
1147 LOG_ERROR("timeout waiting for target %s halt after step",
1148 target_name(target));
1149 retval = ERROR_TARGET_TIMEOUT;
1150 break;
1151 }
1152 }
1153
1154 /*
1155 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1156 * causes a timeout. The core takes the step but doesn't complete it and so
1157 * debug state is never entered. However, you can manually halt the core
1158 * as an external debug even is also a WFI wakeup event.
1159 */
1160 if (retval == ERROR_TARGET_TIMEOUT)
1161 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1162
1163 /* restore EDECR */
1164 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1165 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168
1169 /* restore interrupts */
1170 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1171 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1172 if (retval != ERROR_OK)
1173 return ERROR_OK;
1174 }
1175
1176 if (saved_retval != ERROR_OK)
1177 return saved_retval;
1178
1179 return ERROR_OK;
1180 }
1181
1182 static int aarch64_restore_context(struct target *target, bool bpwp)
1183 {
1184 struct armv8_common *armv8 = target_to_armv8(target);
1185 struct arm *arm = &armv8->arm;
1186
1187 int retval;
1188
1189 LOG_DEBUG("%s", target_name(target));
1190
1191 if (armv8->pre_restore_context)
1192 armv8->pre_restore_context(target);
1193
1194 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1195 if (retval == ERROR_OK) {
1196 /* registers are now invalid */
1197 register_cache_invalidate(arm->core_cache);
1198 register_cache_invalidate(arm->core_cache->next);
1199 }
1200
1201 return retval;
1202 }
1203
1204 /*
1205 * Cortex-A8 Breakpoint and watchpoint functions
1206 */
1207
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int aarch64_set_breakpoint(struct target *target,
1210 struct breakpoint *breakpoint, uint8_t matchmode)
1211 {
1212 int retval;
1213 int brp_i = 0;
1214 uint32_t control;
1215 uint8_t byte_addr_select = 0x0F;
1216 struct aarch64_common *aarch64 = target_to_aarch64(target);
1217 struct armv8_common *armv8 = &aarch64->armv8_common;
1218 struct aarch64_brp *brp_list = aarch64->brp_list;
1219
1220 if (breakpoint->set) {
1221 LOG_WARNING("breakpoint already set");
1222 return ERROR_OK;
1223 }
1224
1225 if (breakpoint->type == BKPT_HARD) {
1226 int64_t bpt_value;
1227 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1228 brp_i++;
1229 if (brp_i >= aarch64->brp_num) {
1230 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1231 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1232 }
1233 breakpoint->set = brp_i + 1;
1234 if (breakpoint->length == 2)
1235 byte_addr_select = (3 << (breakpoint->address & 0x02));
1236 control = ((matchmode & 0x7) << 20)
1237 | (1 << 13)
1238 | (byte_addr_select << 5)
1239 | (3 << 1) | 1;
1240 brp_list[brp_i].used = 1;
1241 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1242 brp_list[brp_i].control = control;
1243 bpt_value = brp_list[brp_i].value;
1244
1245 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1246 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1247 (uint32_t)(bpt_value & 0xFFFFFFFF));
1248 if (retval != ERROR_OK)
1249 return retval;
1250 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1251 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1252 (uint32_t)(bpt_value >> 32));
1253 if (retval != ERROR_OK)
1254 return retval;
1255
1256 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1257 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1258 brp_list[brp_i].control);
1259 if (retval != ERROR_OK)
1260 return retval;
1261 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1262 brp_list[brp_i].control,
1263 brp_list[brp_i].value);
1264
1265 } else if (breakpoint->type == BKPT_SOFT) {
1266 uint32_t opcode;
1267 uint8_t code[4];
1268
1269 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1270 opcode = ARMV8_HLT(11);
1271
1272 if (breakpoint->length != 4)
1273 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1274 } else {
1275 /**
1276 * core_state is ARM_STATE_ARM
1277 * in that case the opcode depends on breakpoint length:
1278 * - if length == 4 => A32 opcode
1279 * - if length == 2 => T32 opcode
1280 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1281 * in that case the length should be changed from 3 to 4 bytes
1282 **/
1283 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1284 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1285
1286 if (breakpoint->length == 3)
1287 breakpoint->length = 4;
1288 }
1289
1290 buf_set_u32(code, 0, 32, opcode);
1291
1292 retval = target_read_memory(target,
1293 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1294 breakpoint->length, 1,
1295 breakpoint->orig_instr);
1296 if (retval != ERROR_OK)
1297 return retval;
1298
1299 armv8_cache_d_inner_flush_virt(armv8,
1300 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1301 breakpoint->length);
1302
1303 retval = target_write_memory(target,
1304 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1305 breakpoint->length, 1, code);
1306 if (retval != ERROR_OK)
1307 return retval;
1308
1309 armv8_cache_d_inner_flush_virt(armv8,
1310 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1311 breakpoint->length);
1312
1313 armv8_cache_i_inner_inval_virt(armv8,
1314 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1315 breakpoint->length);
1316
1317 breakpoint->set = 0x11; /* Any nice value but 0 */
1318 }
1319
1320 /* Ensure that halting debug mode is enable */
1321 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1322 if (retval != ERROR_OK) {
1323 LOG_DEBUG("Failed to set DSCR.HDE");
1324 return retval;
1325 }
1326
1327 return ERROR_OK;
1328 }
1329
1330 static int aarch64_set_context_breakpoint(struct target *target,
1331 struct breakpoint *breakpoint, uint8_t matchmode)
1332 {
1333 int retval = ERROR_FAIL;
1334 int brp_i = 0;
1335 uint32_t control;
1336 uint8_t byte_addr_select = 0x0F;
1337 struct aarch64_common *aarch64 = target_to_aarch64(target);
1338 struct armv8_common *armv8 = &aarch64->armv8_common;
1339 struct aarch64_brp *brp_list = aarch64->brp_list;
1340
1341 if (breakpoint->set) {
1342 LOG_WARNING("breakpoint already set");
1343 return retval;
1344 }
1345 /*check available context BRPs*/
1346 while ((brp_list[brp_i].used ||
1347 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1348 brp_i++;
1349
1350 if (brp_i >= aarch64->brp_num) {
1351 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1352 return ERROR_FAIL;
1353 }
1354
1355 breakpoint->set = brp_i + 1;
1356 control = ((matchmode & 0x7) << 20)
1357 | (1 << 13)
1358 | (byte_addr_select << 5)
1359 | (3 << 1) | 1;
1360 brp_list[brp_i].used = 1;
1361 brp_list[brp_i].value = (breakpoint->asid);
1362 brp_list[brp_i].control = control;
1363 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1364 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1365 brp_list[brp_i].value);
1366 if (retval != ERROR_OK)
1367 return retval;
1368 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1369 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1370 brp_list[brp_i].control);
1371 if (retval != ERROR_OK)
1372 return retval;
1373 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1374 brp_list[brp_i].control,
1375 brp_list[brp_i].value);
1376 return ERROR_OK;
1377
1378 }
1379
1380 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1381 {
1382 int retval = ERROR_FAIL;
1383 int brp_1 = 0; /* holds the contextID pair */
1384 int brp_2 = 0; /* holds the IVA pair */
1385 uint32_t control_CTX, control_IVA;
1386 uint8_t CTX_byte_addr_select = 0x0F;
1387 uint8_t IVA_byte_addr_select = 0x0F;
1388 uint8_t CTX_machmode = 0x03;
1389 uint8_t IVA_machmode = 0x01;
1390 struct aarch64_common *aarch64 = target_to_aarch64(target);
1391 struct armv8_common *armv8 = &aarch64->armv8_common;
1392 struct aarch64_brp *brp_list = aarch64->brp_list;
1393
1394 if (breakpoint->set) {
1395 LOG_WARNING("breakpoint already set");
1396 return retval;
1397 }
1398 /*check available context BRPs*/
1399 while ((brp_list[brp_1].used ||
1400 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1401 brp_1++;
1402
1403 printf("brp(CTX) found num: %d\n", brp_1);
1404 if (brp_1 >= aarch64->brp_num) {
1405 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1406 return ERROR_FAIL;
1407 }
1408
1409 while ((brp_list[brp_2].used ||
1410 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1411 brp_2++;
1412
1413 printf("brp(IVA) found num: %d\n", brp_2);
1414 if (brp_2 >= aarch64->brp_num) {
1415 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1416 return ERROR_FAIL;
1417 }
1418
1419 breakpoint->set = brp_1 + 1;
1420 breakpoint->linked_BRP = brp_2;
1421 control_CTX = ((CTX_machmode & 0x7) << 20)
1422 | (brp_2 << 16)
1423 | (0 << 14)
1424 | (CTX_byte_addr_select << 5)
1425 | (3 << 1) | 1;
1426 brp_list[brp_1].used = 1;
1427 brp_list[brp_1].value = (breakpoint->asid);
1428 brp_list[brp_1].control = control_CTX;
1429 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1430 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1431 brp_list[brp_1].value);
1432 if (retval != ERROR_OK)
1433 return retval;
1434 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1435 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1436 brp_list[brp_1].control);
1437 if (retval != ERROR_OK)
1438 return retval;
1439
1440 control_IVA = ((IVA_machmode & 0x7) << 20)
1441 | (brp_1 << 16)
1442 | (1 << 13)
1443 | (IVA_byte_addr_select << 5)
1444 | (3 << 1) | 1;
1445 brp_list[brp_2].used = 1;
1446 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1447 brp_list[brp_2].control = control_IVA;
1448 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1449 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1450 brp_list[brp_2].value & 0xFFFFFFFF);
1451 if (retval != ERROR_OK)
1452 return retval;
1453 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1454 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1455 brp_list[brp_2].value >> 32);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1459 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1460 brp_list[brp_2].control);
1461 if (retval != ERROR_OK)
1462 return retval;
1463
1464 return ERROR_OK;
1465 }
1466
1467 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1468 {
1469 int retval;
1470 struct aarch64_common *aarch64 = target_to_aarch64(target);
1471 struct armv8_common *armv8 = &aarch64->armv8_common;
1472 struct aarch64_brp *brp_list = aarch64->brp_list;
1473
1474 if (!breakpoint->set) {
1475 LOG_WARNING("breakpoint not set");
1476 return ERROR_OK;
1477 }
1478
1479 if (breakpoint->type == BKPT_HARD) {
1480 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1481 int brp_i = breakpoint->set - 1;
1482 int brp_j = breakpoint->linked_BRP;
1483 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1484 LOG_DEBUG("Invalid BRP number in breakpoint");
1485 return ERROR_OK;
1486 }
1487 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1488 brp_list[brp_i].control, brp_list[brp_i].value);
1489 brp_list[brp_i].used = 0;
1490 brp_list[brp_i].value = 0;
1491 brp_list[brp_i].control = 0;
1492 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1493 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1494 brp_list[brp_i].control);
1495 if (retval != ERROR_OK)
1496 return retval;
1497 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1498 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1499 (uint32_t)brp_list[brp_i].value);
1500 if (retval != ERROR_OK)
1501 return retval;
1502 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1503 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1504 (uint32_t)brp_list[brp_i].value);
1505 if (retval != ERROR_OK)
1506 return retval;
1507 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1508 LOG_DEBUG("Invalid BRP number in breakpoint");
1509 return ERROR_OK;
1510 }
1511 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1512 brp_list[brp_j].control, brp_list[brp_j].value);
1513 brp_list[brp_j].used = 0;
1514 brp_list[brp_j].value = 0;
1515 brp_list[brp_j].control = 0;
1516 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1517 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1518 brp_list[brp_j].control);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1523 (uint32_t)brp_list[brp_j].value);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1527 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1528 (uint32_t)brp_list[brp_j].value);
1529 if (retval != ERROR_OK)
1530 return retval;
1531
1532 breakpoint->linked_BRP = 0;
1533 breakpoint->set = 0;
1534 return ERROR_OK;
1535
1536 } else {
1537 int brp_i = breakpoint->set - 1;
1538 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1539 LOG_DEBUG("Invalid BRP number in breakpoint");
1540 return ERROR_OK;
1541 }
1542 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1543 brp_list[brp_i].control, brp_list[brp_i].value);
1544 brp_list[brp_i].used = 0;
1545 brp_list[brp_i].value = 0;
1546 brp_list[brp_i].control = 0;
1547 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1548 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1549 brp_list[brp_i].control);
1550 if (retval != ERROR_OK)
1551 return retval;
1552 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1553 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1554 brp_list[brp_i].value);
1555 if (retval != ERROR_OK)
1556 return retval;
1557
1558 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1559 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1560 (uint32_t)brp_list[brp_i].value);
1561 if (retval != ERROR_OK)
1562 return retval;
1563 breakpoint->set = 0;
1564 return ERROR_OK;
1565 }
1566 } else {
1567 /* restore original instruction (kept in target endianness) */
1568
1569 armv8_cache_d_inner_flush_virt(armv8,
1570 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1571 breakpoint->length);
1572
1573 if (breakpoint->length == 4) {
1574 retval = target_write_memory(target,
1575 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1576 4, 1, breakpoint->orig_instr);
1577 if (retval != ERROR_OK)
1578 return retval;
1579 } else {
1580 retval = target_write_memory(target,
1581 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1582 2, 1, breakpoint->orig_instr);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 }
1586
1587 armv8_cache_d_inner_flush_virt(armv8,
1588 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1589 breakpoint->length);
1590
1591 armv8_cache_i_inner_inval_virt(armv8,
1592 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1593 breakpoint->length);
1594 }
1595 breakpoint->set = 0;
1596
1597 return ERROR_OK;
1598 }
1599
1600 static int aarch64_add_breakpoint(struct target *target,
1601 struct breakpoint *breakpoint)
1602 {
1603 struct aarch64_common *aarch64 = target_to_aarch64(target);
1604
1605 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1606 LOG_INFO("no hardware breakpoint available");
1607 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1608 }
1609
1610 if (breakpoint->type == BKPT_HARD)
1611 aarch64->brp_num_available--;
1612
1613 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1614 }
1615
1616 static int aarch64_add_context_breakpoint(struct target *target,
1617 struct breakpoint *breakpoint)
1618 {
1619 struct aarch64_common *aarch64 = target_to_aarch64(target);
1620
1621 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1622 LOG_INFO("no hardware breakpoint available");
1623 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1624 }
1625
1626 if (breakpoint->type == BKPT_HARD)
1627 aarch64->brp_num_available--;
1628
1629 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1630 }
1631
1632 static int aarch64_add_hybrid_breakpoint(struct target *target,
1633 struct breakpoint *breakpoint)
1634 {
1635 struct aarch64_common *aarch64 = target_to_aarch64(target);
1636
1637 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1638 LOG_INFO("no hardware breakpoint available");
1639 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1640 }
1641
1642 if (breakpoint->type == BKPT_HARD)
1643 aarch64->brp_num_available--;
1644
1645 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1646 }
1647
1648
1649 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1650 {
1651 struct aarch64_common *aarch64 = target_to_aarch64(target);
1652
1653 #if 0
1654 /* It is perfectly possible to remove breakpoints while the target is running */
1655 if (target->state != TARGET_HALTED) {
1656 LOG_WARNING("target not halted");
1657 return ERROR_TARGET_NOT_HALTED;
1658 }
1659 #endif
1660
1661 if (breakpoint->set) {
1662 aarch64_unset_breakpoint(target, breakpoint);
1663 if (breakpoint->type == BKPT_HARD)
1664 aarch64->brp_num_available++;
1665 }
1666
1667 return ERROR_OK;
1668 }
1669
1670 /*
1671 * Cortex-A8 Reset functions
1672 */
1673
1674 static int aarch64_assert_reset(struct target *target)
1675 {
1676 struct armv8_common *armv8 = target_to_armv8(target);
1677
1678 LOG_DEBUG(" ");
1679
1680 /* FIXME when halt is requested, make it work somehow... */
1681
1682 /* Issue some kind of warm reset. */
1683 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1684 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1685 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1686 /* REVISIT handle "pulls" cases, if there's
1687 * hardware that needs them to work.
1688 */
1689 adapter_assert_reset();
1690 } else {
1691 LOG_ERROR("%s: how to reset?", target_name(target));
1692 return ERROR_FAIL;
1693 }
1694
1695 /* registers are now invalid */
1696 if (target_was_examined(target)) {
1697 register_cache_invalidate(armv8->arm.core_cache);
1698 register_cache_invalidate(armv8->arm.core_cache->next);
1699 }
1700
1701 target->state = TARGET_RESET;
1702
1703 return ERROR_OK;
1704 }
1705
1706 static int aarch64_deassert_reset(struct target *target)
1707 {
1708 int retval;
1709
1710 LOG_DEBUG(" ");
1711
1712 /* be certain SRST is off */
1713 adapter_deassert_reset();
1714
1715 if (!target_was_examined(target))
1716 return ERROR_OK;
1717
1718 retval = aarch64_poll(target);
1719 if (retval != ERROR_OK)
1720 return retval;
1721
1722 retval = aarch64_init_debug_access(target);
1723 if (retval != ERROR_OK)
1724 return retval;
1725
1726 if (target->reset_halt) {
1727 if (target->state != TARGET_HALTED) {
1728 LOG_WARNING("%s: ran after reset and before halt ...",
1729 target_name(target));
1730 retval = target_halt(target);
1731 }
1732 }
1733
1734 return retval;
1735 }
1736
1737 static int aarch64_write_cpu_memory_slow(struct target *target,
1738 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1739 {
1740 struct armv8_common *armv8 = target_to_armv8(target);
1741 struct arm_dpm *dpm = &armv8->dpm;
1742 struct arm *arm = &armv8->arm;
1743 int retval;
1744
1745 armv8_reg_current(arm, 1)->dirty = true;
1746
1747 /* change DCC to normal mode if necessary */
1748 if (*dscr & DSCR_MA) {
1749 *dscr &= ~DSCR_MA;
1750 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1751 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1752 if (retval != ERROR_OK)
1753 return retval;
1754 }
1755
1756 while (count) {
1757 uint32_t data, opcode;
1758
1759 /* write the data to store into DTRRX */
1760 if (size == 1)
1761 data = *buffer;
1762 else if (size == 2)
1763 data = target_buffer_get_u16(target, buffer);
1764 else
1765 data = target_buffer_get_u32(target, buffer);
1766 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1767 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1768 if (retval != ERROR_OK)
1769 return retval;
1770
1771 if (arm->core_state == ARM_STATE_AARCH64)
1772 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1773 else
1774 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1775 if (retval != ERROR_OK)
1776 return retval;
1777
1778 if (size == 1)
1779 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1780 else if (size == 2)
1781 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1782 else
1783 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1784 retval = dpm->instr_execute(dpm, opcode);
1785 if (retval != ERROR_OK)
1786 return retval;
1787
1788 /* Advance */
1789 buffer += size;
1790 --count;
1791 }
1792
1793 return ERROR_OK;
1794 }
1795
1796 static int aarch64_write_cpu_memory_fast(struct target *target,
1797 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1798 {
1799 struct armv8_common *armv8 = target_to_armv8(target);
1800 struct arm *arm = &armv8->arm;
1801 int retval;
1802
1803 armv8_reg_current(arm, 1)->dirty = true;
1804
1805 /* Step 1.d - Change DCC to memory mode */
1806 *dscr |= DSCR_MA;
1807 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1808 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1809 if (retval != ERROR_OK)
1810 return retval;
1811
1812
1813 /* Step 2.a - Do the write */
1814 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1815 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1816 if (retval != ERROR_OK)
1817 return retval;
1818
1819 /* Step 3.a - Switch DTR mode back to Normal mode */
1820 *dscr &= ~DSCR_MA;
1821 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1822 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1823 if (retval != ERROR_OK)
1824 return retval;
1825
1826 return ERROR_OK;
1827 }
1828
1829 static int aarch64_write_cpu_memory(struct target *target,
1830 uint64_t address, uint32_t size,
1831 uint32_t count, const uint8_t *buffer)
1832 {
1833 /* write memory through APB-AP */
1834 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1835 struct armv8_common *armv8 = target_to_armv8(target);
1836 struct arm_dpm *dpm = &armv8->dpm;
1837 struct arm *arm = &armv8->arm;
1838 uint32_t dscr;
1839
1840 if (target->state != TARGET_HALTED) {
1841 LOG_WARNING("target not halted");
1842 return ERROR_TARGET_NOT_HALTED;
1843 }
1844
1845 /* Mark register X0 as dirty, as it will be used
1846 * for transferring the data.
1847 * It will be restored automatically when exiting
1848 * debug mode
1849 */
1850 armv8_reg_current(arm, 0)->dirty = true;
1851
1852 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1853
1854 /* Read DSCR */
1855 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1856 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1857 if (retval != ERROR_OK)
1858 return retval;
1859
1860 /* Set Normal access mode */
1861 dscr = (dscr & ~DSCR_MA);
1862 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1863 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1864 if (retval != ERROR_OK)
1865 return retval;
1866
1867 if (arm->core_state == ARM_STATE_AARCH64) {
1868 /* Write X0 with value 'address' using write procedure */
1869 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1870 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1871 retval = dpm->instr_write_data_dcc_64(dpm,
1872 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1873 } else {
1874 /* Write R0 with value 'address' using write procedure */
1875 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1876 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1877 retval = dpm->instr_write_data_dcc(dpm,
1878 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1879 }
1880
1881 if (retval != ERROR_OK)
1882 return retval;
1883
1884 if (size == 4 && (address % 4) == 0)
1885 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1886 else
1887 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1888
1889 if (retval != ERROR_OK) {
1890 /* Unset DTR mode */
1891 mem_ap_read_atomic_u32(armv8->debug_ap,
1892 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1893 dscr &= ~DSCR_MA;
1894 mem_ap_write_atomic_u32(armv8->debug_ap,
1895 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1896 }
1897
1898 /* Check for sticky abort flags in the DSCR */
1899 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1900 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1901 if (retval != ERROR_OK)
1902 return retval;
1903
1904 dpm->dscr = dscr;
1905 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1906 /* Abort occurred - clear it and exit */
1907 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1908 armv8_dpm_handle_exception(dpm, true);
1909 return ERROR_FAIL;
1910 }
1911
1912 /* Done */
1913 return ERROR_OK;
1914 }
1915
1916 static int aarch64_read_cpu_memory_slow(struct target *target,
1917 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1918 {
1919 struct armv8_common *armv8 = target_to_armv8(target);
1920 struct arm_dpm *dpm = &armv8->dpm;
1921 struct arm *arm = &armv8->arm;
1922 int retval;
1923
1924 armv8_reg_current(arm, 1)->dirty = true;
1925
1926 /* change DCC to normal mode (if necessary) */
1927 if (*dscr & DSCR_MA) {
1928 *dscr &= DSCR_MA;
1929 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1930 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1931 if (retval != ERROR_OK)
1932 return retval;
1933 }
1934
1935 while (count) {
1936 uint32_t opcode, data;
1937
1938 if (size == 1)
1939 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1940 else if (size == 2)
1941 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1942 else
1943 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1944 retval = dpm->instr_execute(dpm, opcode);
1945 if (retval != ERROR_OK)
1946 return retval;
1947
1948 if (arm->core_state == ARM_STATE_AARCH64)
1949 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1950 else
1951 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1952 if (retval != ERROR_OK)
1953 return retval;
1954
1955 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1956 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1957 if (retval != ERROR_OK)
1958 return retval;
1959
1960 if (size == 1)
1961 *buffer = (uint8_t)data;
1962 else if (size == 2)
1963 target_buffer_set_u16(target, buffer, (uint16_t)data);
1964 else
1965 target_buffer_set_u32(target, buffer, data);
1966
1967 /* Advance */
1968 buffer += size;
1969 --count;
1970 }
1971
1972 return ERROR_OK;
1973 }
1974
1975 static int aarch64_read_cpu_memory_fast(struct target *target,
1976 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1977 {
1978 struct armv8_common *armv8 = target_to_armv8(target);
1979 struct arm_dpm *dpm = &armv8->dpm;
1980 struct arm *arm = &armv8->arm;
1981 int retval;
1982 uint32_t value;
1983
1984 /* Mark X1 as dirty */
1985 armv8_reg_current(arm, 1)->dirty = true;
1986
1987 if (arm->core_state == ARM_STATE_AARCH64) {
1988 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1989 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1990 } else {
1991 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1992 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1993 }
1994
1995 if (retval != ERROR_OK)
1996 return retval;
1997
1998 /* Step 1.e - Change DCC to memory mode */
1999 *dscr |= DSCR_MA;
2000 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2001 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2002 if (retval != ERROR_OK)
2003 return retval;
2004
2005 /* Step 1.f - read DBGDTRTX and discard the value */
2006 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2007 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2008 if (retval != ERROR_OK)
2009 return retval;
2010
2011 count--;
2012 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2013 * Abort flags are sticky, so can be read at end of transactions
2014 *
2015 * This data is read in aligned to 32 bit boundary.
2016 */
2017
2018 if (count) {
2019 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2020 * increments X0 by 4. */
2021 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2022 armv8->debug_base + CPUV8_DBG_DTRTX);
2023 if (retval != ERROR_OK)
2024 return retval;
2025 }
2026
2027 /* Step 3.a - set DTR access mode back to Normal mode */
2028 *dscr &= ~DSCR_MA;
2029 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2030 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2031 if (retval != ERROR_OK)
2032 return retval;
2033
2034 /* Step 3.b - read DBGDTRTX for the final value */
2035 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2036 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2037 if (retval != ERROR_OK)
2038 return retval;
2039
2040 target_buffer_set_u32(target, buffer + count * 4, value);
2041 return retval;
2042 }
2043
2044 static int aarch64_read_cpu_memory(struct target *target,
2045 target_addr_t address, uint32_t size,
2046 uint32_t count, uint8_t *buffer)
2047 {
2048 /* read memory through APB-AP */
2049 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2050 struct armv8_common *armv8 = target_to_armv8(target);
2051 struct arm_dpm *dpm = &armv8->dpm;
2052 struct arm *arm = &armv8->arm;
2053 uint32_t dscr;
2054
2055 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2056 address, size, count);
2057
2058 if (target->state != TARGET_HALTED) {
2059 LOG_WARNING("target not halted");
2060 return ERROR_TARGET_NOT_HALTED;
2061 }
2062
2063 /* Mark register X0 as dirty, as it will be used
2064 * for transferring the data.
2065 * It will be restored automatically when exiting
2066 * debug mode
2067 */
2068 armv8_reg_current(arm, 0)->dirty = true;
2069
2070 /* Read DSCR */
2071 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2072 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2077
2078 /* Set Normal access mode */
2079 dscr &= ~DSCR_MA;
2080 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2081 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2082 if (retval != ERROR_OK)
2083 return retval;
2084
2085 if (arm->core_state == ARM_STATE_AARCH64) {
2086 /* Write X0 with value 'address' using write procedure */
2087 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2088 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2089 retval = dpm->instr_write_data_dcc_64(dpm,
2090 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2091 } else {
2092 /* Write R0 with value 'address' using write procedure */
2093 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2094 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2095 retval = dpm->instr_write_data_dcc(dpm,
2096 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2097 }
2098
2099 if (retval != ERROR_OK)
2100 return retval;
2101
2102 if (size == 4 && (address % 4) == 0)
2103 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2104 else
2105 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2106
2107 if (dscr & DSCR_MA) {
2108 dscr &= ~DSCR_MA;
2109 mem_ap_write_atomic_u32(armv8->debug_ap,
2110 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2111 }
2112
2113 if (retval != ERROR_OK)
2114 return retval;
2115
2116 /* Check for sticky abort flags in the DSCR */
2117 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2118 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2119 if (retval != ERROR_OK)
2120 return retval;
2121
2122 dpm->dscr = dscr;
2123
2124 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2125 /* Abort occurred - clear it and exit */
2126 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2127 armv8_dpm_handle_exception(dpm, true);
2128 return ERROR_FAIL;
2129 }
2130
2131 /* Done */
2132 return ERROR_OK;
2133 }
2134
2135 static int aarch64_read_phys_memory(struct target *target,
2136 target_addr_t address, uint32_t size,
2137 uint32_t count, uint8_t *buffer)
2138 {
2139 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2140
2141 if (count && buffer) {
2142 /* read memory through APB-AP */
2143 retval = aarch64_mmu_modify(target, 0);
2144 if (retval != ERROR_OK)
2145 return retval;
2146 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2147 }
2148 return retval;
2149 }
2150
2151 static int aarch64_read_memory(struct target *target, target_addr_t address,
2152 uint32_t size, uint32_t count, uint8_t *buffer)
2153 {
2154 int mmu_enabled = 0;
2155 int retval;
2156
2157 /* determine if MMU was enabled on target stop */
2158 retval = aarch64_mmu(target, &mmu_enabled);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162 if (mmu_enabled) {
2163 /* enable MMU as we could have disabled it for phys access */
2164 retval = aarch64_mmu_modify(target, 1);
2165 if (retval != ERROR_OK)
2166 return retval;
2167 }
2168 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2169 }
2170
2171 static int aarch64_write_phys_memory(struct target *target,
2172 target_addr_t address, uint32_t size,
2173 uint32_t count, const uint8_t *buffer)
2174 {
2175 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2176
2177 if (count && buffer) {
2178 /* write memory through APB-AP */
2179 retval = aarch64_mmu_modify(target, 0);
2180 if (retval != ERROR_OK)
2181 return retval;
2182 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2183 }
2184
2185 return retval;
2186 }
2187
2188 static int aarch64_write_memory(struct target *target, target_addr_t address,
2189 uint32_t size, uint32_t count, const uint8_t *buffer)
2190 {
2191 int mmu_enabled = 0;
2192 int retval;
2193
2194 /* determine if MMU was enabled on target stop */
2195 retval = aarch64_mmu(target, &mmu_enabled);
2196 if (retval != ERROR_OK)
2197 return retval;
2198
2199 if (mmu_enabled) {
2200 /* enable MMU as we could have disabled it for phys access */
2201 retval = aarch64_mmu_modify(target, 1);
2202 if (retval != ERROR_OK)
2203 return retval;
2204 }
2205 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2206 }
2207
2208 static int aarch64_handle_target_request(void *priv)
2209 {
2210 struct target *target = priv;
2211 struct armv8_common *armv8 = target_to_armv8(target);
2212 int retval;
2213
2214 if (!target_was_examined(target))
2215 return ERROR_OK;
2216 if (!target->dbg_msg_enabled)
2217 return ERROR_OK;
2218
2219 if (target->state == TARGET_RUNNING) {
2220 uint32_t request;
2221 uint32_t dscr;
2222 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2223 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2224
2225 /* check if we have data */
2226 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2227 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2228 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2229 if (retval == ERROR_OK) {
2230 target_request(target, request);
2231 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2232 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2233 }
2234 }
2235 }
2236
2237 return ERROR_OK;
2238 }
2239
2240 static int aarch64_examine_first(struct target *target)
2241 {
2242 struct aarch64_common *aarch64 = target_to_aarch64(target);
2243 struct armv8_common *armv8 = &aarch64->armv8_common;
2244 struct adiv5_dap *swjdp = armv8->arm.dap;
2245 struct aarch64_private_config *pc;
2246 int i;
2247 int retval = ERROR_OK;
2248 uint64_t debug, ttypr;
2249 uint32_t cpuid;
2250 uint32_t tmp0, tmp1, tmp2, tmp3;
2251 debug = ttypr = cpuid = 0;
2252
2253 /* Search for the APB-AB - it is needed for access to debug registers */
2254 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2255 if (retval != ERROR_OK) {
2256 LOG_ERROR("Could not find APB-AP for debug access");
2257 return retval;
2258 }
2259
2260 retval = mem_ap_init(armv8->debug_ap);
2261 if (retval != ERROR_OK) {
2262 LOG_ERROR("Could not initialize the APB-AP");
2263 return retval;
2264 }
2265
2266 armv8->debug_ap->memaccess_tck = 10;
2267
2268 if (!target->dbgbase_set) {
2269 uint32_t dbgbase;
2270 /* Get ROM Table base */
2271 uint32_t apid;
2272 int32_t coreidx = target->coreid;
2273 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2274 if (retval != ERROR_OK)
2275 return retval;
2276 /* Lookup 0x15 -- Processor DAP */
2277 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2278 &armv8->debug_base, &coreidx);
2279 if (retval != ERROR_OK)
2280 return retval;
2281 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2282 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2283 } else
2284 armv8->debug_base = target->dbgbase;
2285
2286 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2287 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2288 if (retval != ERROR_OK) {
2289 LOG_DEBUG("Examine %s failed", "oslock");
2290 return retval;
2291 }
2292
2293 retval = mem_ap_read_u32(armv8->debug_ap,
2294 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2295 if (retval != ERROR_OK) {
2296 LOG_DEBUG("Examine %s failed", "CPUID");
2297 return retval;
2298 }
2299
2300 retval = mem_ap_read_u32(armv8->debug_ap,
2301 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2302 retval += mem_ap_read_u32(armv8->debug_ap,
2303 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2304 if (retval != ERROR_OK) {
2305 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2306 return retval;
2307 }
2308 retval = mem_ap_read_u32(armv8->debug_ap,
2309 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2310 retval += mem_ap_read_u32(armv8->debug_ap,
2311 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2312 if (retval != ERROR_OK) {
2313 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2314 return retval;
2315 }
2316
2317 retval = dap_run(armv8->debug_ap->dap);
2318 if (retval != ERROR_OK) {
2319 LOG_ERROR("%s: examination failed\n", target_name(target));
2320 return retval;
2321 }
2322
2323 ttypr |= tmp1;
2324 ttypr = (ttypr << 32) | tmp0;
2325 debug |= tmp3;
2326 debug = (debug << 32) | tmp2;
2327
2328 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2329 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2330 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2331
2332 if (target->private_config == NULL)
2333 return ERROR_FAIL;
2334
2335 pc = (struct aarch64_private_config *)target->private_config;
2336 if (pc->cti == NULL)
2337 return ERROR_FAIL;
2338
2339 armv8->cti = pc->cti;
2340
2341 retval = aarch64_dpm_setup(aarch64, debug);
2342 if (retval != ERROR_OK)
2343 return retval;
2344
2345 /* Setup Breakpoint Register Pairs */
2346 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2347 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2348 aarch64->brp_num_available = aarch64->brp_num;
2349 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2350 for (i = 0; i < aarch64->brp_num; i++) {
2351 aarch64->brp_list[i].used = 0;
2352 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2353 aarch64->brp_list[i].type = BRP_NORMAL;
2354 else
2355 aarch64->brp_list[i].type = BRP_CONTEXT;
2356 aarch64->brp_list[i].value = 0;
2357 aarch64->brp_list[i].control = 0;
2358 aarch64->brp_list[i].BRPn = i;
2359 }
2360
2361 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2362
2363 target->state = TARGET_UNKNOWN;
2364 target->debug_reason = DBG_REASON_NOTHALTED;
2365 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2366 target_set_examined(target);
2367 return ERROR_OK;
2368 }
2369
2370 static int aarch64_examine(struct target *target)
2371 {
2372 int retval = ERROR_OK;
2373
2374 /* don't re-probe hardware after each reset */
2375 if (!target_was_examined(target))
2376 retval = aarch64_examine_first(target);
2377
2378 /* Configure core debug access */
2379 if (retval == ERROR_OK)
2380 retval = aarch64_init_debug_access(target);
2381
2382 return retval;
2383 }
2384
2385 /*
2386 * Cortex-A8 target creation and initialization
2387 */
2388
2389 static int aarch64_init_target(struct command_context *cmd_ctx,
2390 struct target *target)
2391 {
2392 /* examine_first() does a bunch of this */
2393 arm_semihosting_init(target);
2394 return ERROR_OK;
2395 }
2396
2397 static int aarch64_init_arch_info(struct target *target,
2398 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2399 {
2400 struct armv8_common *armv8 = &aarch64->armv8_common;
2401
2402 /* Setup struct aarch64_common */
2403 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2404 armv8->arm.dap = dap;
2405
2406 /* register arch-specific functions */
2407 armv8->examine_debug_reason = NULL;
2408 armv8->post_debug_entry = aarch64_post_debug_entry;
2409 armv8->pre_restore_context = NULL;
2410 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2411
2412 armv8_init_arch_info(target, armv8);
2413 target_register_timer_callback(aarch64_handle_target_request, 1,
2414 TARGET_TIMER_TYPE_PERIODIC, target);
2415
2416 return ERROR_OK;
2417 }
2418
2419 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2420 {
2421 struct aarch64_private_config *pc = target->private_config;
2422 struct aarch64_common *aarch64;
2423
2424 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2425 return ERROR_FAIL;
2426
2427 aarch64 = calloc(1, sizeof(struct aarch64_common));
2428 if (aarch64 == NULL) {
2429 LOG_ERROR("Out of memory");
2430 return ERROR_FAIL;
2431 }
2432
2433 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2434 }
2435
2436 static void aarch64_deinit_target(struct target *target)
2437 {
2438 struct aarch64_common *aarch64 = target_to_aarch64(target);
2439 struct armv8_common *armv8 = &aarch64->armv8_common;
2440 struct arm_dpm *dpm = &armv8->dpm;
2441
2442 armv8_free_reg_cache(target);
2443 free(aarch64->brp_list);
2444 free(dpm->dbp);
2445 free(dpm->dwp);
2446 free(target->private_config);
2447 free(aarch64);
2448 }
2449
2450 static int aarch64_mmu(struct target *target, int *enabled)
2451 {
2452 if (target->state != TARGET_HALTED) {
2453 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2454 return ERROR_TARGET_INVALID;
2455 }
2456
2457 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2458 return ERROR_OK;
2459 }
2460
2461 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2462 target_addr_t *phys)
2463 {
2464 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2465 }
2466
2467 /*
2468 * private target configuration items
2469 */
2470 enum aarch64_cfg_param {
2471 CFG_CTI,
2472 };
2473
2474 static const Jim_Nvp nvp_config_opts[] = {
2475 { .name = "-cti", .value = CFG_CTI },
2476 { .name = NULL, .value = -1 }
2477 };
2478
2479 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2480 {
2481 struct aarch64_private_config *pc;
2482 Jim_Nvp *n;
2483 int e;
2484
2485 pc = (struct aarch64_private_config *)target->private_config;
2486 if (pc == NULL) {
2487 pc = calloc(1, sizeof(struct aarch64_private_config));
2488 target->private_config = pc;
2489 }
2490
2491 /*
2492 * Call adiv5_jim_configure() to parse the common DAP options
2493 * It will return JIM_CONTINUE if it didn't find any known
2494 * options, JIM_OK if it correctly parsed the topmost option
2495 * and JIM_ERR if an error occured during parameter evaluation.
2496 * For JIM_CONTINUE, we check our own params.
2497 */
2498 e = adiv5_jim_configure(target, goi);
2499 if (e != JIM_CONTINUE)
2500 return e;
2501
2502 /* parse config or cget options ... */
2503 if (goi->argc > 0) {
2504 Jim_SetEmptyResult(goi->interp);
2505
2506 /* check first if topmost item is for us */
2507 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2508 goi->argv[0], &n);
2509 if (e != JIM_OK)
2510 return JIM_CONTINUE;
2511
2512 e = Jim_GetOpt_Obj(goi, NULL);
2513 if (e != JIM_OK)
2514 return e;
2515
2516 switch (n->value) {
2517 case CFG_CTI: {
2518 if (goi->isconfigure) {
2519 Jim_Obj *o_cti;
2520 struct arm_cti *cti;
2521 e = Jim_GetOpt_Obj(goi, &o_cti);
2522 if (e != JIM_OK)
2523 return e;
2524 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2525 if (cti == NULL) {
2526 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2527 return JIM_ERR;
2528 }
2529 pc->cti = cti;
2530 } else {
2531 if (goi->argc != 0) {
2532 Jim_WrongNumArgs(goi->interp,
2533 goi->argc, goi->argv,
2534 "NO PARAMS");
2535 return JIM_ERR;
2536 }
2537
2538 if (pc == NULL || pc->cti == NULL) {
2539 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2540 return JIM_ERR;
2541 }
2542 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2543 }
2544 break;
2545 }
2546
2547 default:
2548 return JIM_CONTINUE;
2549 }
2550 }
2551
2552 return JIM_OK;
2553 }
2554
2555 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2556 {
2557 struct target *target = get_current_target(CMD_CTX);
2558 struct armv8_common *armv8 = target_to_armv8(target);
2559
2560 return armv8_handle_cache_info_command(CMD,
2561 &armv8->armv8_mmu.armv8_cache);
2562 }
2563
2564
2565 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2566 {
2567 struct target *target = get_current_target(CMD_CTX);
2568 if (!target_was_examined(target)) {
2569 LOG_ERROR("target not examined yet");
2570 return ERROR_FAIL;
2571 }
2572
2573 return aarch64_init_debug_access(target);
2574 }
2575
2576 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2577 {
2578 struct target *target = get_current_target(CMD_CTX);
2579 struct aarch64_common *aarch64 = target_to_aarch64(target);
2580
2581 static const Jim_Nvp nvp_maskisr_modes[] = {
2582 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2583 { .name = "on", .value = AARCH64_ISRMASK_ON },
2584 { .name = NULL, .value = -1 },
2585 };
2586 const Jim_Nvp *n;
2587
2588 if (CMD_ARGC > 0) {
2589 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2590 if (n->name == NULL) {
2591 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2592 return ERROR_COMMAND_SYNTAX_ERROR;
2593 }
2594
2595 aarch64->isrmasking_mode = n->value;
2596 }
2597
2598 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2599 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2600
2601 return ERROR_OK;
2602 }
2603
2604 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2605 {
2606 struct command_context *context;
2607 struct target *target;
2608 struct arm *arm;
2609 int retval;
2610 bool is_mcr = false;
2611 int arg_cnt = 0;
2612
2613 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2614 is_mcr = true;
2615 arg_cnt = 7;
2616 } else {
2617 arg_cnt = 6;
2618 }
2619
2620 context = current_command_context(interp);
2621 assert(context != NULL);
2622
2623 target = get_current_target(context);
2624 if (target == NULL) {
2625 LOG_ERROR("%s: no current target", __func__);
2626 return JIM_ERR;
2627 }
2628 if (!target_was_examined(target)) {
2629 LOG_ERROR("%s: not yet examined", target_name(target));
2630 return JIM_ERR;
2631 }
2632
2633 arm = target_to_arm(target);
2634 if (!is_arm(arm)) {
2635 LOG_ERROR("%s: not an ARM", target_name(target));
2636 return JIM_ERR;
2637 }
2638
2639 if (target->state != TARGET_HALTED)
2640 return ERROR_TARGET_NOT_HALTED;
2641
2642 if (arm->core_state == ARM_STATE_AARCH64) {
2643 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2644 return JIM_ERR;
2645 }
2646
2647 if (argc != arg_cnt) {
2648 LOG_ERROR("%s: wrong number of arguments", __func__);
2649 return JIM_ERR;
2650 }
2651
2652 int cpnum;
2653 uint32_t op1;
2654 uint32_t op2;
2655 uint32_t CRn;
2656 uint32_t CRm;
2657 uint32_t value;
2658 long l;
2659
2660 /* NOTE: parameter sequence matches ARM instruction set usage:
2661 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2662 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2663 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2664 */
2665 retval = Jim_GetLong(interp, argv[1], &l);
2666 if (retval != JIM_OK)
2667 return retval;
2668 if (l & ~0xf) {
2669 LOG_ERROR("%s: %s %d out of range", __func__,
2670 "coprocessor", (int) l);
2671 return JIM_ERR;
2672 }
2673 cpnum = l;
2674
2675 retval = Jim_GetLong(interp, argv[2], &l);
2676 if (retval != JIM_OK)
2677 return retval;
2678 if (l & ~0x7) {
2679 LOG_ERROR("%s: %s %d out of range", __func__,
2680 "op1", (int) l);
2681 return JIM_ERR;
2682 }
2683 op1 = l;
2684
2685 retval = Jim_GetLong(interp, argv[3], &l);
2686 if (retval != JIM_OK)
2687 return retval;
2688 if (l & ~0xf) {
2689 LOG_ERROR("%s: %s %d out of range", __func__,
2690 "CRn", (int) l);
2691 return JIM_ERR;
2692 }
2693 CRn = l;
2694
2695 retval = Jim_GetLong(interp, argv[4], &l);
2696 if (retval != JIM_OK)
2697 return retval;
2698 if (l & ~0xf) {
2699 LOG_ERROR("%s: %s %d out of range", __func__,
2700 "CRm", (int) l);
2701 return JIM_ERR;
2702 }
2703 CRm = l;
2704
2705 retval = Jim_GetLong(interp, argv[5], &l);
2706 if (retval != JIM_OK)
2707 return retval;
2708 if (l & ~0x7) {
2709 LOG_ERROR("%s: %s %d out of range", __func__,
2710 "op2", (int) l);
2711 return JIM_ERR;
2712 }
2713 op2 = l;
2714
2715 value = 0;
2716
2717 if (is_mcr == true) {
2718 retval = Jim_GetLong(interp, argv[6], &l);
2719 if (retval != JIM_OK)
2720 return retval;
2721 value = l;
2722
2723 /* NOTE: parameters reordered! */
2724 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2725 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2726 if (retval != ERROR_OK)
2727 return JIM_ERR;
2728 } else {
2729 /* NOTE: parameters reordered! */
2730 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2731 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2732 if (retval != ERROR_OK)
2733 return JIM_ERR;
2734
2735 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2736 }
2737
2738 return JIM_OK;
2739 }
2740
2741 static const struct command_registration aarch64_exec_command_handlers[] = {
2742 {
2743 .name = "cache_info",
2744 .handler = aarch64_handle_cache_info_command,
2745 .mode = COMMAND_EXEC,
2746 .help = "display information about target caches",
2747 .usage = "",
2748 },
2749 {
2750 .name = "dbginit",
2751 .handler = aarch64_handle_dbginit_command,
2752 .mode = COMMAND_EXEC,
2753 .help = "Initialize core debug",
2754 .usage = "",
2755 },
2756 {
2757 .name = "maskisr",
2758 .handler = aarch64_mask_interrupts_command,
2759 .mode = COMMAND_ANY,
2760 .help = "mask aarch64 interrupts during single-step",
2761 .usage = "['on'|'off']",
2762 },
2763 {
2764 .name = "mcr",
2765 .mode = COMMAND_EXEC,
2766 .jim_handler = jim_mcrmrc,
2767 .help = "write coprocessor register",
2768 .usage = "cpnum op1 CRn CRm op2 value",
2769 },
2770 {
2771 .name = "mrc",
2772 .mode = COMMAND_EXEC,
2773 .jim_handler = jim_mcrmrc,
2774 .help = "read coprocessor register",
2775 .usage = "cpnum op1 CRn CRm op2",
2776 },
2777 {
2778 .chain = smp_command_handlers,
2779 },
2780
2781
2782 COMMAND_REGISTRATION_DONE
2783 };
2784
2785 extern const struct command_registration semihosting_common_handlers[];
2786
2787 static const struct command_registration aarch64_command_handlers[] = {
2788 {
2789 .name = "arm",
2790 .mode = COMMAND_ANY,
2791 .help = "ARM Command Group",
2792 .usage = "",
2793 .chain = semihosting_common_handlers
2794 },
2795 {
2796 .chain = armv8_command_handlers,
2797 },
2798 {
2799 .name = "aarch64",
2800 .mode = COMMAND_ANY,
2801 .help = "Aarch64 command group",
2802 .usage = "",
2803 .chain = aarch64_exec_command_handlers,
2804 },
2805 COMMAND_REGISTRATION_DONE
2806 };
2807
2808 struct target_type aarch64_target = {
2809 .name = "aarch64",
2810
2811 .poll = aarch64_poll,
2812 .arch_state = armv8_arch_state,
2813
2814 .halt = aarch64_halt,
2815 .resume = aarch64_resume,
2816 .step = aarch64_step,
2817
2818 .assert_reset = aarch64_assert_reset,
2819 .deassert_reset = aarch64_deassert_reset,
2820
2821 /* REVISIT allow exporting VFP3 registers ... */
2822 .get_gdb_arch = armv8_get_gdb_arch,
2823 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2824
2825 .read_memory = aarch64_read_memory,
2826 .write_memory = aarch64_write_memory,
2827
2828 .add_breakpoint = aarch64_add_breakpoint,
2829 .add_context_breakpoint = aarch64_add_context_breakpoint,
2830 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2831 .remove_breakpoint = aarch64_remove_breakpoint,
2832 .add_watchpoint = NULL,
2833 .remove_watchpoint = NULL,
2834
2835 .commands = aarch64_command_handlers,
2836 .target_create = aarch64_target_create,
2837 .target_jim_configure = aarch64_jim_configure,
2838 .init_target = aarch64_init_target,
2839 .deinit_target = aarch64_deinit_target,
2840 .examine = aarch64_examine,
2841
2842 .read_phys_memory = aarch64_read_phys_memory,
2843 .write_phys_memory = aarch64_write_phys_memory,
2844 .mmu = aarch64_mmu,
2845 .virt2phys = aarch64_virt2phys,
2846 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)