target/aarch64: add missing aarch64_poll() calls
[openocd.git] / src / target / aarch64.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
5 * *
6 ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include "breakpoints.h"
13 #include "aarch64.h"
14 #include "a64_disassembler.h"
15 #include "register.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
23 #include "smp.h"
24 #include <helper/nvp.h>
25 #include <helper/time_support.h>
26
27 enum restart_mode {
28 RESTART_LAZY,
29 RESTART_SYNC,
30 };
31
32 enum halt_mode {
33 HALT_LAZY,
34 HALT_SYNC,
35 };
36
37 struct aarch64_private_config {
38 struct adiv5_private_config adiv5_config;
39 struct arm_cti *cti;
40 };
41
42 static int aarch64_poll(struct target *target);
43 static int aarch64_debug_entry(struct target *target);
44 static int aarch64_restore_context(struct target *target, bool bpwp);
45 static int aarch64_set_breakpoint(struct target *target,
46 struct breakpoint *breakpoint, uint8_t matchmode);
47 static int aarch64_set_context_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int aarch64_set_hybrid_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int aarch64_unset_breakpoint(struct target *target,
52 struct breakpoint *breakpoint);
53 static int aarch64_mmu(struct target *target, int *enabled);
54 static int aarch64_virt2phys(struct target *target,
55 target_addr_t virt, target_addr_t *phys);
56 static int aarch64_read_cpu_memory(struct target *target,
57 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
58
59 static int aarch64_restore_system_control_reg(struct target *target)
60 {
61 enum arm_mode target_mode = ARM_MODE_ANY;
62 int retval = ERROR_OK;
63 uint32_t instr;
64
65 struct aarch64_common *aarch64 = target_to_aarch64(target);
66 struct armv8_common *armv8 = target_to_armv8(target);
67
68 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
69 aarch64->system_control_reg_curr = aarch64->system_control_reg;
70 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
71
72 switch (armv8->arm.core_mode) {
73 case ARMV8_64_EL0T:
74 target_mode = ARMV8_64_EL1H;
75 /* fall through */
76 case ARMV8_64_EL1T:
77 case ARMV8_64_EL1H:
78 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
79 break;
80 case ARMV8_64_EL2T:
81 case ARMV8_64_EL2H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
83 break;
84 case ARMV8_64_EL3H:
85 case ARMV8_64_EL3T:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
87 break;
88
89 case ARM_MODE_SVC:
90 case ARM_MODE_ABT:
91 case ARM_MODE_FIQ:
92 case ARM_MODE_IRQ:
93 case ARM_MODE_HYP:
94 case ARM_MODE_UND:
95 case ARM_MODE_SYS:
96 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
97 break;
98
99 default:
100 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
101 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
102 return ERROR_FAIL;
103 }
104
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
111
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114 }
115
116 return retval;
117 }
118
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 enum arm_mode target_mode = ARM_MODE_ANY;
128 uint32_t instr = 0;
129
130 if (enable) {
131 /* if mmu enabled at target stop and mmu not enable */
132 if (!(aarch64->system_control_reg & 0x1U)) {
133 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
134 return ERROR_FAIL;
135 }
136 if (!(aarch64->system_control_reg_curr & 0x1U))
137 aarch64->system_control_reg_curr |= 0x1U;
138 } else {
139 if (aarch64->system_control_reg_curr & 0x4U) {
140 /* data cache is active */
141 aarch64->system_control_reg_curr &= ~0x4U;
142 /* flush data cache armv8 function to be called */
143 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
144 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
145 }
146 if ((aarch64->system_control_reg_curr & 0x1U)) {
147 aarch64->system_control_reg_curr &= ~0x1U;
148 }
149 }
150
151 switch (armv8->arm.core_mode) {
152 case ARMV8_64_EL0T:
153 target_mode = ARMV8_64_EL1H;
154 /* fall through */
155 case ARMV8_64_EL1T:
156 case ARMV8_64_EL1H:
157 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
158 break;
159 case ARMV8_64_EL2T:
160 case ARMV8_64_EL2H:
161 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
162 break;
163 case ARMV8_64_EL3H:
164 case ARMV8_64_EL3T:
165 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
166 break;
167
168 case ARM_MODE_SVC:
169 case ARM_MODE_ABT:
170 case ARM_MODE_FIQ:
171 case ARM_MODE_IRQ:
172 case ARM_MODE_HYP:
173 case ARM_MODE_UND:
174 case ARM_MODE_SYS:
175 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
176 break;
177
178 default:
179 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
180 break;
181 }
182 if (target_mode != ARM_MODE_ANY)
183 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
184
185 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
186 aarch64->system_control_reg_curr);
187
188 if (target_mode != ARM_MODE_ANY)
189 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
190
191 return retval;
192 }
193
194 /*
195 * Basic debug access, very low level assumes state is saved
196 */
197 static int aarch64_init_debug_access(struct target *target)
198 {
199 struct armv8_common *armv8 = target_to_armv8(target);
200 int retval;
201 uint32_t dummy;
202
203 LOG_DEBUG("%s", target_name(target));
204
205 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
206 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
207 if (retval != ERROR_OK) {
208 LOG_DEBUG("Examine %s failed", "oslock");
209 return retval;
210 }
211
212 /* Clear Sticky Power Down status Bit in PRSR to enable access to
213 the registers in the Core Power Domain */
214 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
215 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /*
220 * Static CTI configuration:
221 * Channel 0 -> trigger outputs HALT request to PE
222 * Channel 1 -> trigger outputs Resume request to PE
223 * Gate all channel trigger events from entering the CTM
224 */
225
226 /* Enable CTI */
227 retval = arm_cti_enable(armv8->cti, true);
228 /* By default, gate all channel events to and from the CTM */
229 if (retval == ERROR_OK)
230 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
231 /* output halt requests to PE on channel 0 event */
232 if (retval == ERROR_OK)
233 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
234 /* output restart requests to PE on channel 1 event */
235 if (retval == ERROR_OK)
236 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Resync breakpoint registers */
241
242 return ERROR_OK;
243 }
244
245 /* Write to memory mapped registers directly with no cache or mmu handling */
246 static int aarch64_dap_write_memap_register_u32(struct target *target,
247 target_addr_t address,
248 uint32_t value)
249 {
250 int retval;
251 struct armv8_common *armv8 = target_to_armv8(target);
252
253 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
254
255 return retval;
256 }
257
258 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
259 {
260 struct arm_dpm *dpm = &a8->armv8_common.dpm;
261 int retval;
262
263 dpm->arm = &a8->armv8_common.arm;
264 dpm->didr = debug;
265
266 retval = armv8_dpm_setup(dpm);
267 if (retval == ERROR_OK)
268 retval = armv8_dpm_initialize(dpm);
269
270 return retval;
271 }
272
273 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
274 {
275 struct armv8_common *armv8 = target_to_armv8(target);
276 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
277 }
278
279 static int aarch64_check_state_one(struct target *target,
280 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
281 {
282 struct armv8_common *armv8 = target_to_armv8(target);
283 uint32_t prsr;
284 int retval;
285
286 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
287 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
288 if (retval != ERROR_OK)
289 return retval;
290
291 if (p_prsr)
292 *p_prsr = prsr;
293
294 if (p_result)
295 *p_result = (prsr & mask) == (val & mask);
296
297 return ERROR_OK;
298 }
299
300 static int aarch64_wait_halt_one(struct target *target)
301 {
302 int retval = ERROR_OK;
303 uint32_t prsr;
304
305 int64_t then = timeval_ms();
306 for (;;) {
307 int halted;
308
309 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
310 if (retval != ERROR_OK || halted)
311 break;
312
313 if (timeval_ms() > then + 1000) {
314 retval = ERROR_TARGET_TIMEOUT;
315 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
316 break;
317 }
318 }
319 return retval;
320 }
321
322 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
323 {
324 int retval = ERROR_OK;
325 struct target_list *head;
326 struct target *first = NULL;
327
328 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
329
330 foreach_smp_target(head, target->smp_targets) {
331 struct target *curr = head->target;
332 struct armv8_common *armv8 = target_to_armv8(curr);
333
334 if (exc_target && curr == target)
335 continue;
336 if (!target_was_examined(curr))
337 continue;
338 if (curr->state != TARGET_RUNNING)
339 continue;
340
341 /* HACK: mark this target as prepared for halting */
342 curr->debug_reason = DBG_REASON_DBGRQ;
343
344 /* open the gate for channel 0 to let HALT requests pass to the CTM */
345 retval = arm_cti_ungate_channel(armv8->cti, 0);
346 if (retval == ERROR_OK)
347 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
348 if (retval != ERROR_OK)
349 break;
350
351 LOG_DEBUG("target %s prepared", target_name(curr));
352
353 if (!first)
354 first = curr;
355 }
356
357 if (p_first) {
358 if (exc_target && first)
359 *p_first = first;
360 else
361 *p_first = target;
362 }
363
364 return retval;
365 }
366
367 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
368 {
369 int retval = ERROR_OK;
370 struct armv8_common *armv8 = target_to_armv8(target);
371
372 LOG_DEBUG("%s", target_name(target));
373
374 /* allow Halting Debug Mode */
375 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
376 if (retval != ERROR_OK)
377 return retval;
378
379 /* trigger an event on channel 0, this outputs a halt request to the PE */
380 retval = arm_cti_pulse_channel(armv8->cti, 0);
381 if (retval != ERROR_OK)
382 return retval;
383
384 if (mode == HALT_SYNC) {
385 retval = aarch64_wait_halt_one(target);
386 if (retval != ERROR_OK) {
387 if (retval == ERROR_TARGET_TIMEOUT)
388 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
389 return retval;
390 }
391 }
392
393 return ERROR_OK;
394 }
395
396 static int aarch64_halt_smp(struct target *target, bool exc_target)
397 {
398 struct target *next = target;
399 int retval;
400
401 /* prepare halt on all PEs of the group */
402 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
403
404 if (exc_target && next == target)
405 return retval;
406
407 /* halt the target PE */
408 if (retval == ERROR_OK)
409 retval = aarch64_halt_one(next, HALT_LAZY);
410
411 if (retval != ERROR_OK)
412 return retval;
413
414 /* wait for all PEs to halt */
415 int64_t then = timeval_ms();
416 for (;;) {
417 bool all_halted = true;
418 struct target_list *head;
419 struct target *curr;
420
421 foreach_smp_target(head, target->smp_targets) {
422 int halted;
423
424 curr = head->target;
425
426 if (!target_was_examined(curr))
427 continue;
428
429 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
430 if (retval != ERROR_OK || !halted) {
431 all_halted = false;
432 break;
433 }
434 }
435
436 if (all_halted)
437 break;
438
439 if (timeval_ms() > then + 1000) {
440 retval = ERROR_TARGET_TIMEOUT;
441 break;
442 }
443
444 /*
445 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
446 * and it looks like the CTI's are not connected by a common
447 * trigger matrix. It seems that we need to halt one core in each
448 * cluster explicitly. So if we find that a core has not halted
449 * yet, we trigger an explicit halt for the second cluster.
450 */
451 retval = aarch64_halt_one(curr, HALT_LAZY);
452 if (retval != ERROR_OK)
453 break;
454 }
455
456 return retval;
457 }
458
459 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
460 {
461 struct target *gdb_target = NULL;
462 struct target_list *head;
463 struct target *curr;
464
465 if (debug_reason == DBG_REASON_NOTHALTED) {
466 LOG_DEBUG("Halting remaining targets in SMP group");
467 aarch64_halt_smp(target, true);
468 }
469
470 /* poll all targets in the group, but skip the target that serves GDB */
471 foreach_smp_target(head, target->smp_targets) {
472 curr = head->target;
473 /* skip calling context */
474 if (curr == target)
475 continue;
476 if (!target_was_examined(curr))
477 continue;
478 /* skip targets that were already halted */
479 if (curr->state == TARGET_HALTED)
480 continue;
481 /* remember the gdb_service->target */
482 if (curr->gdb_service)
483 gdb_target = curr->gdb_service->target;
484 /* skip it */
485 if (curr == gdb_target)
486 continue;
487
488 /* avoid recursion in aarch64_poll() */
489 curr->smp = 0;
490 aarch64_poll(curr);
491 curr->smp = 1;
492 }
493
494 /* after all targets were updated, poll the gdb serving target */
495 if (gdb_target && gdb_target != target)
496 aarch64_poll(gdb_target);
497
498 return ERROR_OK;
499 }
500
501 /*
502 * Aarch64 Run control
503 */
504
505 static int aarch64_poll(struct target *target)
506 {
507 enum target_state prev_target_state;
508 int retval = ERROR_OK;
509 int halted;
510
511 retval = aarch64_check_state_one(target,
512 PRSR_HALT, PRSR_HALT, &halted, NULL);
513 if (retval != ERROR_OK)
514 return retval;
515
516 if (halted) {
517 prev_target_state = target->state;
518 if (prev_target_state != TARGET_HALTED) {
519 enum target_debug_reason debug_reason = target->debug_reason;
520
521 /* We have a halting debug event */
522 target->state = TARGET_HALTED;
523 LOG_DEBUG("Target %s halted", target_name(target));
524 retval = aarch64_debug_entry(target);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if (target->smp)
529 update_halt_gdb(target, debug_reason);
530
531 if (arm_semihosting(target, &retval) != 0)
532 return retval;
533
534 switch (prev_target_state) {
535 case TARGET_RUNNING:
536 case TARGET_UNKNOWN:
537 case TARGET_RESET:
538 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
539 break;
540 case TARGET_DEBUG_RUNNING:
541 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
542 break;
543 default:
544 break;
545 }
546 }
547 } else
548 target->state = TARGET_RUNNING;
549
550 return retval;
551 }
552
553 static int aarch64_halt(struct target *target)
554 {
555 struct armv8_common *armv8 = target_to_armv8(target);
556 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
557
558 if (target->smp)
559 return aarch64_halt_smp(target, false);
560
561 return aarch64_halt_one(target, HALT_SYNC);
562 }
563
564 static int aarch64_restore_one(struct target *target, int current,
565 uint64_t *address, int handle_breakpoints, int debug_execution)
566 {
567 struct armv8_common *armv8 = target_to_armv8(target);
568 struct arm *arm = &armv8->arm;
569 int retval;
570 uint64_t resume_pc;
571
572 LOG_DEBUG("%s", target_name(target));
573
574 if (!debug_execution)
575 target_free_all_working_areas(target);
576
577 /* current = 1: continue on current pc, otherwise continue at <address> */
578 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
579 if (!current)
580 resume_pc = *address;
581 else
582 *address = resume_pc;
583
584 /* Make sure that the Armv7 gdb thumb fixups does not
585 * kill the return address
586 */
587 switch (arm->core_state) {
588 case ARM_STATE_ARM:
589 resume_pc &= 0xFFFFFFFC;
590 break;
591 case ARM_STATE_AARCH64:
592 resume_pc &= 0xFFFFFFFFFFFFFFFCULL;
593 break;
594 case ARM_STATE_THUMB:
595 case ARM_STATE_THUMB_EE:
596 /* When the return address is loaded into PC
597 * bit 0 must be 1 to stay in Thumb state
598 */
599 resume_pc |= 0x1;
600 break;
601 case ARM_STATE_JAZELLE:
602 LOG_ERROR("How do I resume into Jazelle state??");
603 return ERROR_FAIL;
604 }
605 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
606 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
607 arm->pc->dirty = true;
608 arm->pc->valid = true;
609
610 /* called it now before restoring context because it uses cpu
611 * register r0 for restoring system control register */
612 retval = aarch64_restore_system_control_reg(target);
613 if (retval == ERROR_OK)
614 retval = aarch64_restore_context(target, handle_breakpoints);
615
616 return retval;
617 }
618
619 /**
620 * prepare single target for restart
621 *
622 *
623 */
624 static int aarch64_prepare_restart_one(struct target *target)
625 {
626 struct armv8_common *armv8 = target_to_armv8(target);
627 int retval;
628 uint32_t dscr;
629 uint32_t tmp;
630
631 LOG_DEBUG("%s", target_name(target));
632
633 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
635 if (retval != ERROR_OK)
636 return retval;
637
638 if ((dscr & DSCR_ITE) == 0)
639 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
640 if ((dscr & DSCR_ERR) != 0)
641 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
642
643 /* acknowledge a pending CTI halt event */
644 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
645 /*
646 * open the CTI gate for channel 1 so that the restart events
647 * get passed along to all PEs. Also close gate for channel 0
648 * to isolate the PE from halt events.
649 */
650 if (retval == ERROR_OK)
651 retval = arm_cti_ungate_channel(armv8->cti, 1);
652 if (retval == ERROR_OK)
653 retval = arm_cti_gate_channel(armv8->cti, 0);
654
655 /* make sure that DSCR.HDE is set */
656 if (retval == ERROR_OK) {
657 dscr |= DSCR_HDE;
658 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
659 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
660 }
661
662 if (retval == ERROR_OK) {
663 /* clear sticky bits in PRSR, SDR is now 0 */
664 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
665 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
666 }
667
668 return retval;
669 }
670
671 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
672 {
673 struct armv8_common *armv8 = target_to_armv8(target);
674 int retval;
675
676 LOG_DEBUG("%s", target_name(target));
677
678 /* trigger an event on channel 1, generates a restart request to the PE */
679 retval = arm_cti_pulse_channel(armv8->cti, 1);
680 if (retval != ERROR_OK)
681 return retval;
682
683 if (mode == RESTART_SYNC) {
684 int64_t then = timeval_ms();
685 for (;;) {
686 int resumed;
687 /*
688 * if PRSR.SDR is set now, the target did restart, even
689 * if it's now already halted again (e.g. due to breakpoint)
690 */
691 retval = aarch64_check_state_one(target,
692 PRSR_SDR, PRSR_SDR, &resumed, NULL);
693 if (retval != ERROR_OK || resumed)
694 break;
695
696 if (timeval_ms() > then + 1000) {
697 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
698 retval = ERROR_TARGET_TIMEOUT;
699 break;
700 }
701 }
702 }
703
704 if (retval != ERROR_OK)
705 return retval;
706
707 target->debug_reason = DBG_REASON_NOTHALTED;
708 target->state = TARGET_RUNNING;
709
710 return ERROR_OK;
711 }
712
713 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
714 {
715 int retval;
716
717 LOG_DEBUG("%s", target_name(target));
718
719 retval = aarch64_prepare_restart_one(target);
720 if (retval == ERROR_OK)
721 retval = aarch64_do_restart_one(target, mode);
722
723 return retval;
724 }
725
726 /*
727 * prepare all but the current target for restart
728 */
729 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
730 {
731 int retval = ERROR_OK;
732 struct target_list *head;
733 struct target *first = NULL;
734 uint64_t address;
735
736 foreach_smp_target(head, target->smp_targets) {
737 struct target *curr = head->target;
738
739 /* skip calling target */
740 if (curr == target)
741 continue;
742 if (!target_was_examined(curr))
743 continue;
744 if (curr->state != TARGET_HALTED)
745 continue;
746
747 /* resume at current address, not in step mode */
748 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
749 if (retval == ERROR_OK)
750 retval = aarch64_prepare_restart_one(curr);
751 if (retval != ERROR_OK) {
752 LOG_ERROR("failed to restore target %s", target_name(curr));
753 break;
754 }
755 /* remember the first valid target in the group */
756 if (!first)
757 first = curr;
758 }
759
760 if (p_first)
761 *p_first = first;
762
763 return retval;
764 }
765
766
767 static int aarch64_step_restart_smp(struct target *target)
768 {
769 int retval = ERROR_OK;
770 struct target_list *head;
771 struct target *first = NULL;
772
773 LOG_DEBUG("%s", target_name(target));
774
775 retval = aarch64_prep_restart_smp(target, 0, &first);
776 if (retval != ERROR_OK)
777 return retval;
778
779 if (first)
780 retval = aarch64_do_restart_one(first, RESTART_LAZY);
781 if (retval != ERROR_OK) {
782 LOG_DEBUG("error restarting target %s", target_name(first));
783 return retval;
784 }
785
786 int64_t then = timeval_ms();
787 for (;;) {
788 struct target *curr = target;
789 bool all_resumed = true;
790
791 foreach_smp_target(head, target->smp_targets) {
792 uint32_t prsr;
793 int resumed;
794
795 curr = head->target;
796
797 if (curr == target)
798 continue;
799
800 if (!target_was_examined(curr))
801 continue;
802
803 retval = aarch64_check_state_one(curr,
804 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
805 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
806 all_resumed = false;
807 break;
808 }
809
810 if (curr->state != TARGET_RUNNING) {
811 curr->state = TARGET_RUNNING;
812 curr->debug_reason = DBG_REASON_NOTHALTED;
813 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
814 }
815 }
816
817 if (all_resumed)
818 break;
819
820 if (timeval_ms() > then + 1000) {
821 LOG_ERROR("%s: timeout waiting for target resume", __func__);
822 retval = ERROR_TARGET_TIMEOUT;
823 break;
824 }
825 /*
826 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
827 * and it looks like the CTI's are not connected by a common
828 * trigger matrix. It seems that we need to halt one core in each
829 * cluster explicitly. So if we find that a core has not halted
830 * yet, we trigger an explicit resume for the second cluster.
831 */
832 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
833 if (retval != ERROR_OK)
834 break;
835 }
836
837 return retval;
838 }
839
840 static int aarch64_resume(struct target *target, int current,
841 target_addr_t address, int handle_breakpoints, int debug_execution)
842 {
843 int retval = 0;
844 uint64_t addr = address;
845
846 struct armv8_common *armv8 = target_to_armv8(target);
847 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
848
849 if (target->state != TARGET_HALTED) {
850 LOG_TARGET_ERROR(target, "not halted");
851 return ERROR_TARGET_NOT_HALTED;
852 }
853
854 /*
855 * If this target is part of a SMP group, prepare the others
856 * targets for resuming. This involves restoring the complete
857 * target register context and setting up CTI gates to accept
858 * resume events from the trigger matrix.
859 */
860 if (target->smp) {
861 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
862 if (retval != ERROR_OK)
863 return retval;
864 }
865
866 /* all targets prepared, restore and restart the current target */
867 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
868 debug_execution);
869 if (retval == ERROR_OK)
870 retval = aarch64_restart_one(target, RESTART_SYNC);
871 if (retval != ERROR_OK)
872 return retval;
873
874 if (target->smp) {
875 int64_t then = timeval_ms();
876 for (;;) {
877 struct target *curr = target;
878 struct target_list *head;
879 bool all_resumed = true;
880
881 foreach_smp_target(head, target->smp_targets) {
882 uint32_t prsr;
883 int resumed;
884
885 curr = head->target;
886 if (curr == target)
887 continue;
888 if (!target_was_examined(curr))
889 continue;
890
891 retval = aarch64_check_state_one(curr,
892 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
893 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
894 all_resumed = false;
895 break;
896 }
897
898 if (curr->state != TARGET_RUNNING) {
899 curr->state = TARGET_RUNNING;
900 curr->debug_reason = DBG_REASON_NOTHALTED;
901 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
902 }
903 }
904
905 if (all_resumed)
906 break;
907
908 if (timeval_ms() > then + 1000) {
909 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
910 retval = ERROR_TARGET_TIMEOUT;
911 break;
912 }
913
914 /*
915 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
916 * and it looks like the CTI's are not connected by a common
917 * trigger matrix. It seems that we need to halt one core in each
918 * cluster explicitly. So if we find that a core has not halted
919 * yet, we trigger an explicit resume for the second cluster.
920 */
921 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
922 if (retval != ERROR_OK)
923 break;
924 }
925 }
926
927 if (retval != ERROR_OK)
928 return retval;
929
930 target->debug_reason = DBG_REASON_NOTHALTED;
931
932 if (!debug_execution) {
933 target->state = TARGET_RUNNING;
934 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
935 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
936 } else {
937 target->state = TARGET_DEBUG_RUNNING;
938 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
939 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
940 }
941
942 return ERROR_OK;
943 }
944
945 static int aarch64_debug_entry(struct target *target)
946 {
947 int retval = ERROR_OK;
948 struct armv8_common *armv8 = target_to_armv8(target);
949 struct arm_dpm *dpm = &armv8->dpm;
950 enum arm_state core_state;
951 uint32_t dscr;
952
953 /* make sure to clear all sticky errors */
954 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
956 if (retval == ERROR_OK)
957 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
958 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
959 if (retval == ERROR_OK)
960 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
961
962 if (retval != ERROR_OK)
963 return retval;
964
965 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
966
967 dpm->dscr = dscr;
968 core_state = armv8_dpm_get_core_state(dpm);
969 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
970 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
971
972 /* close the CTI gate for all events */
973 if (retval == ERROR_OK)
974 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
975 /* discard async exceptions */
976 if (retval == ERROR_OK)
977 retval = dpm->instr_cpsr_sync(dpm);
978 if (retval != ERROR_OK)
979 return retval;
980
981 /* Examine debug reason */
982 armv8_dpm_report_dscr(dpm, dscr);
983
984 /* save the memory address that triggered the watchpoint */
985 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
986 uint32_t tmp;
987
988 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
989 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
990 if (retval != ERROR_OK)
991 return retval;
992 target_addr_t edwar = tmp;
993
994 /* EDWAR[63:32] has unknown content in aarch32 state */
995 if (core_state == ARM_STATE_AARCH64) {
996 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
997 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
998 if (retval != ERROR_OK)
999 return retval;
1000 edwar |= ((target_addr_t)tmp) << 32;
1001 }
1002
1003 armv8->dpm.wp_addr = edwar;
1004 }
1005
1006 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1007
1008 if (retval == ERROR_OK && armv8->post_debug_entry)
1009 retval = armv8->post_debug_entry(target);
1010
1011 return retval;
1012 }
1013
1014 static int aarch64_post_debug_entry(struct target *target)
1015 {
1016 struct aarch64_common *aarch64 = target_to_aarch64(target);
1017 struct armv8_common *armv8 = &aarch64->armv8_common;
1018 int retval;
1019 enum arm_mode target_mode = ARM_MODE_ANY;
1020 uint32_t instr;
1021
1022 switch (armv8->arm.core_mode) {
1023 case ARMV8_64_EL0T:
1024 target_mode = ARMV8_64_EL1H;
1025 /* fall through */
1026 case ARMV8_64_EL1T:
1027 case ARMV8_64_EL1H:
1028 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1029 break;
1030 case ARMV8_64_EL2T:
1031 case ARMV8_64_EL2H:
1032 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1033 break;
1034 case ARMV8_64_EL3H:
1035 case ARMV8_64_EL3T:
1036 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1037 break;
1038
1039 case ARM_MODE_SVC:
1040 case ARM_MODE_ABT:
1041 case ARM_MODE_FIQ:
1042 case ARM_MODE_IRQ:
1043 case ARM_MODE_HYP:
1044 case ARM_MODE_UND:
1045 case ARM_MODE_SYS:
1046 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1047 break;
1048
1049 default:
1050 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1051 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1052 return ERROR_FAIL;
1053 }
1054
1055 if (target_mode != ARM_MODE_ANY)
1056 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1057
1058 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1059 if (retval != ERROR_OK)
1060 return retval;
1061
1062 if (target_mode != ARM_MODE_ANY)
1063 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1064
1065 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1066 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1067
1068 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1069 armv8_identify_cache(armv8);
1070 armv8_read_mpidr(armv8);
1071 }
1072 if (armv8->is_armv8r) {
1073 armv8->armv8_mmu.mmu_enabled = 0;
1074 } else {
1075 armv8->armv8_mmu.mmu_enabled =
1076 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1077 }
1078 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1079 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1080 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1081 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1082 return ERROR_OK;
1083 }
1084
1085 /*
1086 * single-step a target
1087 */
1088 static int aarch64_step(struct target *target, int current, target_addr_t address,
1089 int handle_breakpoints)
1090 {
1091 struct armv8_common *armv8 = target_to_armv8(target);
1092 struct aarch64_common *aarch64 = target_to_aarch64(target);
1093 int saved_retval = ERROR_OK;
1094 int poll_retval;
1095 int retval;
1096 uint32_t edecr;
1097
1098 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1099
1100 if (target->state != TARGET_HALTED) {
1101 LOG_TARGET_ERROR(target, "not halted");
1102 return ERROR_TARGET_NOT_HALTED;
1103 }
1104
1105 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1106 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1107 /* make sure EDECR.SS is not set when restoring the register */
1108
1109 if (retval == ERROR_OK) {
1110 edecr &= ~0x4;
1111 /* set EDECR.SS to enter hardware step mode */
1112 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1113 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1114 }
1115 /* disable interrupts while stepping */
1116 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1117 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1118 /* bail out if stepping setup has failed */
1119 if (retval != ERROR_OK)
1120 return retval;
1121
1122 if (target->smp && (current == 1)) {
1123 /*
1124 * isolate current target so that it doesn't get resumed
1125 * together with the others
1126 */
1127 retval = arm_cti_gate_channel(armv8->cti, 1);
1128 /* resume all other targets in the group */
1129 if (retval == ERROR_OK)
1130 retval = aarch64_step_restart_smp(target);
1131 if (retval != ERROR_OK) {
1132 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1133 return retval;
1134 }
1135 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1136 }
1137
1138 /* all other targets running, restore and restart the current target */
1139 retval = aarch64_restore_one(target, current, &address, 0, 0);
1140 if (retval == ERROR_OK)
1141 retval = aarch64_restart_one(target, RESTART_LAZY);
1142
1143 if (retval != ERROR_OK)
1144 return retval;
1145
1146 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1147 if (!handle_breakpoints)
1148 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1149
1150 int64_t then = timeval_ms();
1151 for (;;) {
1152 int stepped;
1153 uint32_t prsr;
1154
1155 retval = aarch64_check_state_one(target,
1156 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1157 if (retval != ERROR_OK || stepped)
1158 break;
1159
1160 if (timeval_ms() > then + 100) {
1161 LOG_ERROR("timeout waiting for target %s halt after step",
1162 target_name(target));
1163 retval = ERROR_TARGET_TIMEOUT;
1164 break;
1165 }
1166 }
1167
1168 /*
1169 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1170 * causes a timeout. The core takes the step but doesn't complete it and so
1171 * debug state is never entered. However, you can manually halt the core
1172 * as an external debug even is also a WFI wakeup event.
1173 */
1174 if (retval == ERROR_TARGET_TIMEOUT)
1175 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1176
1177 poll_retval = aarch64_poll(target);
1178
1179 /* restore EDECR */
1180 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1181 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1182 if (retval != ERROR_OK)
1183 return retval;
1184
1185 /* restore interrupts */
1186 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1187 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1188 if (retval != ERROR_OK)
1189 return ERROR_OK;
1190 }
1191
1192 if (saved_retval != ERROR_OK)
1193 return saved_retval;
1194
1195 if (poll_retval != ERROR_OK)
1196 return poll_retval;
1197
1198 return ERROR_OK;
1199 }
1200
1201 static int aarch64_restore_context(struct target *target, bool bpwp)
1202 {
1203 struct armv8_common *armv8 = target_to_armv8(target);
1204 struct arm *arm = &armv8->arm;
1205
1206 int retval;
1207
1208 LOG_DEBUG("%s", target_name(target));
1209
1210 if (armv8->pre_restore_context)
1211 armv8->pre_restore_context(target);
1212
1213 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1214 if (retval == ERROR_OK) {
1215 /* registers are now invalid */
1216 register_cache_invalidate(arm->core_cache);
1217 register_cache_invalidate(arm->core_cache->next);
1218 }
1219
1220 return retval;
1221 }
1222
1223 /*
1224 * Cortex-A8 Breakpoint and watchpoint functions
1225 */
1226
1227 /* Setup hardware Breakpoint Register Pair */
1228 static int aarch64_set_breakpoint(struct target *target,
1229 struct breakpoint *breakpoint, uint8_t matchmode)
1230 {
1231 int retval;
1232 int brp_i = 0;
1233 uint32_t control;
1234 uint8_t byte_addr_select = 0x0F;
1235 struct aarch64_common *aarch64 = target_to_aarch64(target);
1236 struct armv8_common *armv8 = &aarch64->armv8_common;
1237 struct aarch64_brp *brp_list = aarch64->brp_list;
1238
1239 if (breakpoint->is_set) {
1240 LOG_WARNING("breakpoint already set");
1241 return ERROR_OK;
1242 }
1243
1244 if (breakpoint->type == BKPT_HARD) {
1245 int64_t bpt_value;
1246 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1247 brp_i++;
1248 if (brp_i >= aarch64->brp_num) {
1249 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1251 }
1252 breakpoint_hw_set(breakpoint, brp_i);
1253 if (breakpoint->length == 2)
1254 byte_addr_select = (3 << (breakpoint->address & 0x02));
1255 control = ((matchmode & 0x7) << 20)
1256 | (1 << 13)
1257 | (byte_addr_select << 5)
1258 | (3 << 1) | 1;
1259 brp_list[brp_i].used = 1;
1260 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1261 brp_list[brp_i].control = control;
1262 bpt_value = brp_list[brp_i].value;
1263
1264 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1265 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1266 (uint32_t)(bpt_value & 0xFFFFFFFF));
1267 if (retval != ERROR_OK)
1268 return retval;
1269 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1270 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1271 (uint32_t)(bpt_value >> 32));
1272 if (retval != ERROR_OK)
1273 return retval;
1274
1275 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1276 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1277 brp_list[brp_i].control);
1278 if (retval != ERROR_OK)
1279 return retval;
1280 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1281 brp_list[brp_i].control,
1282 brp_list[brp_i].value);
1283
1284 } else if (breakpoint->type == BKPT_SOFT) {
1285 uint32_t opcode;
1286 uint8_t code[4];
1287
1288 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1289 opcode = ARMV8_HLT(11);
1290
1291 if (breakpoint->length != 4)
1292 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1293 } else {
1294 /**
1295 * core_state is ARM_STATE_ARM
1296 * in that case the opcode depends on breakpoint length:
1297 * - if length == 4 => A32 opcode
1298 * - if length == 2 => T32 opcode
1299 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1300 * in that case the length should be changed from 3 to 4 bytes
1301 **/
1302 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1303 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1304
1305 if (breakpoint->length == 3)
1306 breakpoint->length = 4;
1307 }
1308
1309 buf_set_u32(code, 0, 32, opcode);
1310
1311 retval = target_read_memory(target,
1312 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1313 breakpoint->length, 1,
1314 breakpoint->orig_instr);
1315 if (retval != ERROR_OK)
1316 return retval;
1317
1318 armv8_cache_d_inner_flush_virt(armv8,
1319 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1320 breakpoint->length);
1321
1322 retval = target_write_memory(target,
1323 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1324 breakpoint->length, 1, code);
1325 if (retval != ERROR_OK)
1326 return retval;
1327
1328 armv8_cache_d_inner_flush_virt(armv8,
1329 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1330 breakpoint->length);
1331
1332 armv8_cache_i_inner_inval_virt(armv8,
1333 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1334 breakpoint->length);
1335
1336 breakpoint->is_set = true;
1337 }
1338
1339 /* Ensure that halting debug mode is enable */
1340 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1341 if (retval != ERROR_OK) {
1342 LOG_DEBUG("Failed to set DSCR.HDE");
1343 return retval;
1344 }
1345
1346 return ERROR_OK;
1347 }
1348
1349 static int aarch64_set_context_breakpoint(struct target *target,
1350 struct breakpoint *breakpoint, uint8_t matchmode)
1351 {
1352 int retval = ERROR_FAIL;
1353 int brp_i = 0;
1354 uint32_t control;
1355 uint8_t byte_addr_select = 0x0F;
1356 struct aarch64_common *aarch64 = target_to_aarch64(target);
1357 struct armv8_common *armv8 = &aarch64->armv8_common;
1358 struct aarch64_brp *brp_list = aarch64->brp_list;
1359
1360 if (breakpoint->is_set) {
1361 LOG_WARNING("breakpoint already set");
1362 return retval;
1363 }
1364 /*check available context BRPs*/
1365 while ((brp_list[brp_i].used ||
1366 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1367 brp_i++;
1368
1369 if (brp_i >= aarch64->brp_num) {
1370 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371 return ERROR_FAIL;
1372 }
1373
1374 breakpoint_hw_set(breakpoint, brp_i);
1375 control = ((matchmode & 0x7) << 20)
1376 | (1 << 13)
1377 | (byte_addr_select << 5)
1378 | (3 << 1) | 1;
1379 brp_list[brp_i].used = 1;
1380 brp_list[brp_i].value = (breakpoint->asid);
1381 brp_list[brp_i].control = control;
1382 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1383 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1384 brp_list[brp_i].value);
1385 if (retval != ERROR_OK)
1386 return retval;
1387 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1388 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1389 brp_list[brp_i].control);
1390 if (retval != ERROR_OK)
1391 return retval;
1392 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1393 brp_list[brp_i].control,
1394 brp_list[brp_i].value);
1395 return ERROR_OK;
1396
1397 }
1398
1399 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1400 {
1401 int retval = ERROR_FAIL;
1402 int brp_1 = 0; /* holds the contextID pair */
1403 int brp_2 = 0; /* holds the IVA pair */
1404 uint32_t control_ctx, control_iva;
1405 uint8_t ctx_byte_addr_select = 0x0F;
1406 uint8_t iva_byte_addr_select = 0x0F;
1407 uint8_t ctx_machmode = 0x03;
1408 uint8_t iva_machmode = 0x01;
1409 struct aarch64_common *aarch64 = target_to_aarch64(target);
1410 struct armv8_common *armv8 = &aarch64->armv8_common;
1411 struct aarch64_brp *brp_list = aarch64->brp_list;
1412
1413 if (breakpoint->is_set) {
1414 LOG_WARNING("breakpoint already set");
1415 return retval;
1416 }
1417 /*check available context BRPs*/
1418 while ((brp_list[brp_1].used ||
1419 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1420 brp_1++;
1421
1422 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1423 if (brp_1 >= aarch64->brp_num) {
1424 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1425 return ERROR_FAIL;
1426 }
1427
1428 while ((brp_list[brp_2].used ||
1429 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1430 brp_2++;
1431
1432 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1433 if (brp_2 >= aarch64->brp_num) {
1434 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1435 return ERROR_FAIL;
1436 }
1437
1438 breakpoint_hw_set(breakpoint, brp_1);
1439 breakpoint->linked_brp = brp_2;
1440 control_ctx = ((ctx_machmode & 0x7) << 20)
1441 | (brp_2 << 16)
1442 | (0 << 14)
1443 | (ctx_byte_addr_select << 5)
1444 | (3 << 1) | 1;
1445 brp_list[brp_1].used = 1;
1446 brp_list[brp_1].value = (breakpoint->asid);
1447 brp_list[brp_1].control = control_ctx;
1448 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1449 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1450 brp_list[brp_1].value);
1451 if (retval != ERROR_OK)
1452 return retval;
1453 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1454 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1455 brp_list[brp_1].control);
1456 if (retval != ERROR_OK)
1457 return retval;
1458
1459 control_iva = ((iva_machmode & 0x7) << 20)
1460 | (brp_1 << 16)
1461 | (1 << 13)
1462 | (iva_byte_addr_select << 5)
1463 | (3 << 1) | 1;
1464 brp_list[brp_2].used = 1;
1465 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1466 brp_list[brp_2].control = control_iva;
1467 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1468 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1469 brp_list[brp_2].value & 0xFFFFFFFF);
1470 if (retval != ERROR_OK)
1471 return retval;
1472 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1473 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1474 brp_list[brp_2].value >> 32);
1475 if (retval != ERROR_OK)
1476 return retval;
1477 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1478 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1479 brp_list[brp_2].control);
1480 if (retval != ERROR_OK)
1481 return retval;
1482
1483 return ERROR_OK;
1484 }
1485
1486 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1487 {
1488 int retval;
1489 struct aarch64_common *aarch64 = target_to_aarch64(target);
1490 struct armv8_common *armv8 = &aarch64->armv8_common;
1491 struct aarch64_brp *brp_list = aarch64->brp_list;
1492
1493 if (!breakpoint->is_set) {
1494 LOG_WARNING("breakpoint not set");
1495 return ERROR_OK;
1496 }
1497
1498 if (breakpoint->type == BKPT_HARD) {
1499 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1500 int brp_i = breakpoint->number;
1501 int brp_j = breakpoint->linked_brp;
1502 if (brp_i >= aarch64->brp_num) {
1503 LOG_DEBUG("Invalid BRP number in breakpoint");
1504 return ERROR_OK;
1505 }
1506 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1507 brp_list[brp_i].control, brp_list[brp_i].value);
1508 brp_list[brp_i].used = 0;
1509 brp_list[brp_i].value = 0;
1510 brp_list[brp_i].control = 0;
1511 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1512 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1513 brp_list[brp_i].control);
1514 if (retval != ERROR_OK)
1515 return retval;
1516 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1517 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1518 (uint32_t)brp_list[brp_i].value);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1523 (uint32_t)brp_list[brp_i].value);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1527 LOG_DEBUG("Invalid BRP number in breakpoint");
1528 return ERROR_OK;
1529 }
1530 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1531 brp_list[brp_j].control, brp_list[brp_j].value);
1532 brp_list[brp_j].used = 0;
1533 brp_list[brp_j].value = 0;
1534 brp_list[brp_j].control = 0;
1535 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1536 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1537 brp_list[brp_j].control);
1538 if (retval != ERROR_OK)
1539 return retval;
1540 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1541 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1542 (uint32_t)brp_list[brp_j].value);
1543 if (retval != ERROR_OK)
1544 return retval;
1545 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1546 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1547 (uint32_t)brp_list[brp_j].value);
1548 if (retval != ERROR_OK)
1549 return retval;
1550
1551 breakpoint->linked_brp = 0;
1552 breakpoint->is_set = false;
1553 return ERROR_OK;
1554
1555 } else {
1556 int brp_i = breakpoint->number;
1557 if (brp_i >= aarch64->brp_num) {
1558 LOG_DEBUG("Invalid BRP number in breakpoint");
1559 return ERROR_OK;
1560 }
1561 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1562 brp_list[brp_i].control, brp_list[brp_i].value);
1563 brp_list[brp_i].used = 0;
1564 brp_list[brp_i].value = 0;
1565 brp_list[brp_i].control = 0;
1566 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1567 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1568 brp_list[brp_i].control);
1569 if (retval != ERROR_OK)
1570 return retval;
1571 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1572 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1573 brp_list[brp_i].value);
1574 if (retval != ERROR_OK)
1575 return retval;
1576
1577 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1578 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1579 (uint32_t)brp_list[brp_i].value);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 breakpoint->is_set = false;
1583 return ERROR_OK;
1584 }
1585 } else {
1586 /* restore original instruction (kept in target endianness) */
1587
1588 armv8_cache_d_inner_flush_virt(armv8,
1589 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1590 breakpoint->length);
1591
1592 if (breakpoint->length == 4) {
1593 retval = target_write_memory(target,
1594 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1595 4, 1, breakpoint->orig_instr);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 } else {
1599 retval = target_write_memory(target,
1600 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1601 2, 1, breakpoint->orig_instr);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 }
1605
1606 armv8_cache_d_inner_flush_virt(armv8,
1607 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1608 breakpoint->length);
1609
1610 armv8_cache_i_inner_inval_virt(armv8,
1611 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1612 breakpoint->length);
1613 }
1614 breakpoint->is_set = false;
1615
1616 return ERROR_OK;
1617 }
1618
1619 static int aarch64_add_breakpoint(struct target *target,
1620 struct breakpoint *breakpoint)
1621 {
1622 struct aarch64_common *aarch64 = target_to_aarch64(target);
1623
1624 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1625 LOG_INFO("no hardware breakpoint available");
1626 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1627 }
1628
1629 if (breakpoint->type == BKPT_HARD)
1630 aarch64->brp_num_available--;
1631
1632 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1633 }
1634
1635 static int aarch64_add_context_breakpoint(struct target *target,
1636 struct breakpoint *breakpoint)
1637 {
1638 struct aarch64_common *aarch64 = target_to_aarch64(target);
1639
1640 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1641 LOG_INFO("no hardware breakpoint available");
1642 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1643 }
1644
1645 if (breakpoint->type == BKPT_HARD)
1646 aarch64->brp_num_available--;
1647
1648 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1649 }
1650
1651 static int aarch64_add_hybrid_breakpoint(struct target *target,
1652 struct breakpoint *breakpoint)
1653 {
1654 struct aarch64_common *aarch64 = target_to_aarch64(target);
1655
1656 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1657 LOG_INFO("no hardware breakpoint available");
1658 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1659 }
1660
1661 if (breakpoint->type == BKPT_HARD)
1662 aarch64->brp_num_available--;
1663
1664 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1665 }
1666
1667 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1668 {
1669 struct aarch64_common *aarch64 = target_to_aarch64(target);
1670
1671 #if 0
1672 /* It is perfectly possible to remove breakpoints while the target is running */
1673 if (target->state != TARGET_HALTED) {
1674 LOG_WARNING("target not halted");
1675 return ERROR_TARGET_NOT_HALTED;
1676 }
1677 #endif
1678
1679 if (breakpoint->is_set) {
1680 aarch64_unset_breakpoint(target, breakpoint);
1681 if (breakpoint->type == BKPT_HARD)
1682 aarch64->brp_num_available++;
1683 }
1684
1685 return ERROR_OK;
1686 }
1687
1688 /* Setup hardware Watchpoint Register Pair */
1689 static int aarch64_set_watchpoint(struct target *target,
1690 struct watchpoint *watchpoint)
1691 {
1692 int retval;
1693 int wp_i = 0;
1694 uint32_t control, offset, length;
1695 struct aarch64_common *aarch64 = target_to_aarch64(target);
1696 struct armv8_common *armv8 = &aarch64->armv8_common;
1697 struct aarch64_brp *wp_list = aarch64->wp_list;
1698
1699 if (watchpoint->is_set) {
1700 LOG_WARNING("watchpoint already set");
1701 return ERROR_OK;
1702 }
1703
1704 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1705 wp_i++;
1706 if (wp_i >= aarch64->wp_num) {
1707 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1708 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1709 }
1710
1711 control = (1 << 0) /* enable */
1712 | (3 << 1) /* both user and privileged access */
1713 | (1 << 13); /* higher mode control */
1714
1715 switch (watchpoint->rw) {
1716 case WPT_READ:
1717 control |= 1 << 3;
1718 break;
1719 case WPT_WRITE:
1720 control |= 2 << 3;
1721 break;
1722 case WPT_ACCESS:
1723 control |= 3 << 3;
1724 break;
1725 }
1726
1727 /* Match up to 8 bytes. */
1728 offset = watchpoint->address & 7;
1729 length = watchpoint->length;
1730 if (offset + length > sizeof(uint64_t)) {
1731 length = sizeof(uint64_t) - offset;
1732 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1733 }
1734 for (; length > 0; offset++, length--)
1735 control |= (1 << offset) << 5;
1736
1737 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1738 wp_list[wp_i].control = control;
1739
1740 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1741 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1742 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1743 if (retval != ERROR_OK)
1744 return retval;
1745 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1746 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1747 (uint32_t)(wp_list[wp_i].value >> 32));
1748 if (retval != ERROR_OK)
1749 return retval;
1750
1751 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1752 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1753 control);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1757 wp_list[wp_i].control, wp_list[wp_i].value);
1758
1759 /* Ensure that halting debug mode is enable */
1760 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1761 if (retval != ERROR_OK) {
1762 LOG_DEBUG("Failed to set DSCR.HDE");
1763 return retval;
1764 }
1765
1766 wp_list[wp_i].used = 1;
1767 watchpoint_set(watchpoint, wp_i);
1768
1769 return ERROR_OK;
1770 }
1771
1772 /* Clear hardware Watchpoint Register Pair */
1773 static int aarch64_unset_watchpoint(struct target *target,
1774 struct watchpoint *watchpoint)
1775 {
1776 int retval;
1777 struct aarch64_common *aarch64 = target_to_aarch64(target);
1778 struct armv8_common *armv8 = &aarch64->armv8_common;
1779 struct aarch64_brp *wp_list = aarch64->wp_list;
1780
1781 if (!watchpoint->is_set) {
1782 LOG_WARNING("watchpoint not set");
1783 return ERROR_OK;
1784 }
1785
1786 int wp_i = watchpoint->number;
1787 if (wp_i >= aarch64->wp_num) {
1788 LOG_DEBUG("Invalid WP number in watchpoint");
1789 return ERROR_OK;
1790 }
1791 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1792 wp_list[wp_i].control, wp_list[wp_i].value);
1793 wp_list[wp_i].used = 0;
1794 wp_list[wp_i].value = 0;
1795 wp_list[wp_i].control = 0;
1796 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1797 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1798 wp_list[wp_i].control);
1799 if (retval != ERROR_OK)
1800 return retval;
1801 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1802 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1803 wp_list[wp_i].value);
1804 if (retval != ERROR_OK)
1805 return retval;
1806
1807 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1808 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1809 (uint32_t)wp_list[wp_i].value);
1810 if (retval != ERROR_OK)
1811 return retval;
1812 watchpoint->is_set = false;
1813
1814 return ERROR_OK;
1815 }
1816
1817 static int aarch64_add_watchpoint(struct target *target,
1818 struct watchpoint *watchpoint)
1819 {
1820 int retval;
1821 struct aarch64_common *aarch64 = target_to_aarch64(target);
1822
1823 if (aarch64->wp_num_available < 1) {
1824 LOG_INFO("no hardware watchpoint available");
1825 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1826 }
1827
1828 retval = aarch64_set_watchpoint(target, watchpoint);
1829 if (retval == ERROR_OK)
1830 aarch64->wp_num_available--;
1831
1832 return retval;
1833 }
1834
1835 static int aarch64_remove_watchpoint(struct target *target,
1836 struct watchpoint *watchpoint)
1837 {
1838 struct aarch64_common *aarch64 = target_to_aarch64(target);
1839
1840 if (watchpoint->is_set) {
1841 aarch64_unset_watchpoint(target, watchpoint);
1842 aarch64->wp_num_available++;
1843 }
1844
1845 return ERROR_OK;
1846 }
1847
1848 /**
1849 * find out which watchpoint hits
1850 * get exception address and compare the address to watchpoints
1851 */
1852 static int aarch64_hit_watchpoint(struct target *target,
1853 struct watchpoint **hit_watchpoint)
1854 {
1855 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1856 return ERROR_FAIL;
1857
1858 struct armv8_common *armv8 = target_to_armv8(target);
1859
1860 target_addr_t exception_address;
1861 struct watchpoint *wp;
1862
1863 exception_address = armv8->dpm.wp_addr;
1864
1865 if (exception_address == 0xFFFFFFFF)
1866 return ERROR_FAIL;
1867
1868 for (wp = target->watchpoints; wp; wp = wp->next)
1869 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1870 *hit_watchpoint = wp;
1871 return ERROR_OK;
1872 }
1873
1874 return ERROR_FAIL;
1875 }
1876
1877 /*
1878 * Cortex-A8 Reset functions
1879 */
1880
1881 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1882 {
1883 struct armv8_common *armv8 = target_to_armv8(target);
1884 uint32_t edecr;
1885 int retval;
1886
1887 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1888 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1889 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1890 if (retval != ERROR_OK)
1891 return retval;
1892
1893 if (enable)
1894 edecr |= ECR_RCE;
1895 else
1896 edecr &= ~ECR_RCE;
1897
1898 return mem_ap_write_atomic_u32(armv8->debug_ap,
1899 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1900 }
1901
1902 static int aarch64_clear_reset_catch(struct target *target)
1903 {
1904 struct armv8_common *armv8 = target_to_armv8(target);
1905 uint32_t edesr;
1906 int retval;
1907 bool was_triggered;
1908
1909 /* check if Reset Catch debug event triggered as expected */
1910 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1911 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1912 if (retval != ERROR_OK)
1913 return retval;
1914
1915 was_triggered = !!(edesr & ESR_RC);
1916 LOG_DEBUG("Reset Catch debug event %s",
1917 was_triggered ? "triggered" : "NOT triggered!");
1918
1919 if (was_triggered) {
1920 /* clear pending Reset Catch debug event */
1921 edesr &= ~ESR_RC;
1922 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1923 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1924 if (retval != ERROR_OK)
1925 return retval;
1926 }
1927
1928 return ERROR_OK;
1929 }
1930
1931 static int aarch64_assert_reset(struct target *target)
1932 {
1933 struct armv8_common *armv8 = target_to_armv8(target);
1934 enum reset_types reset_config = jtag_get_reset_config();
1935 int retval;
1936
1937 LOG_DEBUG(" ");
1938
1939 /* Issue some kind of warm reset. */
1940 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1941 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1942 else if (reset_config & RESET_HAS_SRST) {
1943 bool srst_asserted = false;
1944
1945 if (target->reset_halt && !(reset_config & RESET_SRST_PULLS_TRST)) {
1946 if (target_was_examined(target)) {
1947
1948 if (reset_config & RESET_SRST_NO_GATING) {
1949 /*
1950 * SRST needs to be asserted *before* Reset Catch
1951 * debug event can be set up.
1952 */
1953 adapter_assert_reset();
1954 srst_asserted = true;
1955 }
1956
1957 /* make sure to clear all sticky errors */
1958 mem_ap_write_atomic_u32(armv8->debug_ap,
1959 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1960
1961 /* set up Reset Catch debug event to halt the CPU after reset */
1962 retval = aarch64_enable_reset_catch(target, true);
1963 if (retval != ERROR_OK)
1964 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1965 target_name(target));
1966 } else {
1967 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1968 target_name(target));
1969 }
1970 }
1971
1972 /* REVISIT handle "pulls" cases, if there's
1973 * hardware that needs them to work.
1974 */
1975 if (!srst_asserted)
1976 adapter_assert_reset();
1977 } else {
1978 LOG_ERROR("%s: how to reset?", target_name(target));
1979 return ERROR_FAIL;
1980 }
1981
1982 /* registers are now invalid */
1983 if (target_was_examined(target)) {
1984 register_cache_invalidate(armv8->arm.core_cache);
1985 register_cache_invalidate(armv8->arm.core_cache->next);
1986 }
1987
1988 target->state = TARGET_RESET;
1989
1990 return ERROR_OK;
1991 }
1992
1993 static int aarch64_deassert_reset(struct target *target)
1994 {
1995 int retval;
1996
1997 LOG_DEBUG(" ");
1998
1999 /* be certain SRST is off */
2000 adapter_deassert_reset();
2001
2002 if (!target_was_examined(target))
2003 return ERROR_OK;
2004
2005 retval = aarch64_init_debug_access(target);
2006 if (retval != ERROR_OK)
2007 return retval;
2008
2009 retval = aarch64_poll(target);
2010 if (retval != ERROR_OK)
2011 return retval;
2012
2013 if (target->reset_halt) {
2014 /* clear pending Reset Catch debug event */
2015 retval = aarch64_clear_reset_catch(target);
2016 if (retval != ERROR_OK)
2017 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2018 target_name(target));
2019
2020 /* disable Reset Catch debug event */
2021 retval = aarch64_enable_reset_catch(target, false);
2022 if (retval != ERROR_OK)
2023 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2024 target_name(target));
2025
2026 if (target->state != TARGET_HALTED) {
2027 LOG_WARNING("%s: ran after reset and before halt ...",
2028 target_name(target));
2029 if (target_was_examined(target)) {
2030 retval = aarch64_halt_one(target, HALT_LAZY);
2031 if (retval != ERROR_OK)
2032 return retval;
2033 } else {
2034 target->state = TARGET_UNKNOWN;
2035 }
2036 }
2037 }
2038
2039 return ERROR_OK;
2040 }
2041
2042 static int aarch64_write_cpu_memory_slow(struct target *target,
2043 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2044 {
2045 struct armv8_common *armv8 = target_to_armv8(target);
2046 struct arm_dpm *dpm = &armv8->dpm;
2047 struct arm *arm = &armv8->arm;
2048 int retval;
2049
2050 armv8_reg_current(arm, 1)->dirty = true;
2051
2052 /* change DCC to normal mode if necessary */
2053 if (*dscr & DSCR_MA) {
2054 *dscr &= ~DSCR_MA;
2055 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2056 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2057 if (retval != ERROR_OK)
2058 return retval;
2059 }
2060
2061 while (count) {
2062 uint32_t data, opcode;
2063
2064 /* write the data to store into DTRRX */
2065 if (size == 1)
2066 data = *buffer;
2067 else if (size == 2)
2068 data = target_buffer_get_u16(target, buffer);
2069 else
2070 data = target_buffer_get_u32(target, buffer);
2071 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2072 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 if (arm->core_state == ARM_STATE_AARCH64)
2077 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2078 else
2079 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2080 if (retval != ERROR_OK)
2081 return retval;
2082
2083 if (size == 1)
2084 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2085 else if (size == 2)
2086 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2087 else
2088 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2089 retval = dpm->instr_execute(dpm, opcode);
2090 if (retval != ERROR_OK)
2091 return retval;
2092
2093 /* Advance */
2094 buffer += size;
2095 --count;
2096 }
2097
2098 return ERROR_OK;
2099 }
2100
2101 static int aarch64_write_cpu_memory_fast(struct target *target,
2102 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2103 {
2104 struct armv8_common *armv8 = target_to_armv8(target);
2105 struct arm *arm = &armv8->arm;
2106 int retval;
2107
2108 armv8_reg_current(arm, 1)->dirty = true;
2109
2110 /* Step 1.d - Change DCC to memory mode */
2111 *dscr |= DSCR_MA;
2112 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2113 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2114 if (retval != ERROR_OK)
2115 return retval;
2116
2117
2118 /* Step 2.a - Do the write */
2119 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2120 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 /* Step 3.a - Switch DTR mode back to Normal mode */
2125 *dscr &= ~DSCR_MA;
2126 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2127 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2128 if (retval != ERROR_OK)
2129 return retval;
2130
2131 return ERROR_OK;
2132 }
2133
2134 static int aarch64_write_cpu_memory(struct target *target,
2135 uint64_t address, uint32_t size,
2136 uint32_t count, const uint8_t *buffer)
2137 {
2138 /* write memory through APB-AP */
2139 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2140 struct armv8_common *armv8 = target_to_armv8(target);
2141 struct arm_dpm *dpm = &armv8->dpm;
2142 struct arm *arm = &armv8->arm;
2143 uint32_t dscr;
2144
2145 if (target->state != TARGET_HALTED) {
2146 LOG_TARGET_ERROR(target, "not halted");
2147 return ERROR_TARGET_NOT_HALTED;
2148 }
2149
2150 /* Mark register X0 as dirty, as it will be used
2151 * for transferring the data.
2152 * It will be restored automatically when exiting
2153 * debug mode
2154 */
2155 armv8_reg_current(arm, 0)->dirty = true;
2156
2157 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2158
2159 /* Read DSCR */
2160 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2161 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2162 if (retval != ERROR_OK)
2163 return retval;
2164
2165 /* Set Normal access mode */
2166 dscr = (dscr & ~DSCR_MA);
2167 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2168 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2169 if (retval != ERROR_OK)
2170 return retval;
2171
2172 if (arm->core_state == ARM_STATE_AARCH64) {
2173 /* Write X0 with value 'address' using write procedure */
2174 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2175 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2176 retval = dpm->instr_write_data_dcc_64(dpm,
2177 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2178 } else {
2179 /* Write R0 with value 'address' using write procedure */
2180 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2181 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2182 retval = dpm->instr_write_data_dcc(dpm,
2183 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2184 }
2185
2186 if (retval != ERROR_OK)
2187 return retval;
2188
2189 if (size == 4 && (address % 4) == 0)
2190 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2191 else
2192 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2193
2194 if (retval != ERROR_OK) {
2195 /* Unset DTR mode */
2196 mem_ap_read_atomic_u32(armv8->debug_ap,
2197 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2198 dscr &= ~DSCR_MA;
2199 mem_ap_write_atomic_u32(armv8->debug_ap,
2200 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2201 }
2202
2203 /* Check for sticky abort flags in the DSCR */
2204 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2205 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2206 if (retval != ERROR_OK)
2207 return retval;
2208
2209 dpm->dscr = dscr;
2210 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2211 /* Abort occurred - clear it and exit */
2212 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2213 armv8_dpm_handle_exception(dpm, true);
2214 return ERROR_FAIL;
2215 }
2216
2217 /* Done */
2218 return ERROR_OK;
2219 }
2220
2221 static int aarch64_read_cpu_memory_slow(struct target *target,
2222 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2223 {
2224 struct armv8_common *armv8 = target_to_armv8(target);
2225 struct arm_dpm *dpm = &armv8->dpm;
2226 struct arm *arm = &armv8->arm;
2227 int retval;
2228
2229 armv8_reg_current(arm, 1)->dirty = true;
2230
2231 /* change DCC to normal mode (if necessary) */
2232 if (*dscr & DSCR_MA) {
2233 *dscr &= DSCR_MA;
2234 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2235 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2236 if (retval != ERROR_OK)
2237 return retval;
2238 }
2239
2240 while (count) {
2241 uint32_t opcode, data;
2242
2243 if (size == 1)
2244 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2245 else if (size == 2)
2246 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2247 else
2248 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2249 retval = dpm->instr_execute(dpm, opcode);
2250 if (retval != ERROR_OK)
2251 return retval;
2252
2253 if (arm->core_state == ARM_STATE_AARCH64)
2254 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2255 else
2256 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2257 if (retval != ERROR_OK)
2258 return retval;
2259
2260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2261 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2262 if (retval != ERROR_OK)
2263 return retval;
2264
2265 if (size == 1)
2266 *buffer = (uint8_t)data;
2267 else if (size == 2)
2268 target_buffer_set_u16(target, buffer, (uint16_t)data);
2269 else
2270 target_buffer_set_u32(target, buffer, data);
2271
2272 /* Advance */
2273 buffer += size;
2274 --count;
2275 }
2276
2277 return ERROR_OK;
2278 }
2279
2280 static int aarch64_read_cpu_memory_fast(struct target *target,
2281 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2282 {
2283 struct armv8_common *armv8 = target_to_armv8(target);
2284 struct arm_dpm *dpm = &armv8->dpm;
2285 struct arm *arm = &armv8->arm;
2286 int retval;
2287 uint32_t value;
2288
2289 /* Mark X1 as dirty */
2290 armv8_reg_current(arm, 1)->dirty = true;
2291
2292 if (arm->core_state == ARM_STATE_AARCH64) {
2293 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2294 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2295 } else {
2296 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2297 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2298 }
2299
2300 if (retval != ERROR_OK)
2301 return retval;
2302
2303 /* Step 1.e - Change DCC to memory mode */
2304 *dscr |= DSCR_MA;
2305 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2306 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2307 if (retval != ERROR_OK)
2308 return retval;
2309
2310 /* Step 1.f - read DBGDTRTX and discard the value */
2311 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2312 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2313 if (retval != ERROR_OK)
2314 return retval;
2315
2316 count--;
2317 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2318 * Abort flags are sticky, so can be read at end of transactions
2319 *
2320 * This data is read in aligned to 32 bit boundary.
2321 */
2322
2323 if (count) {
2324 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2325 * increments X0 by 4. */
2326 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2327 armv8->debug_base + CPUV8_DBG_DTRTX);
2328 if (retval != ERROR_OK)
2329 return retval;
2330 }
2331
2332 /* Step 3.a - set DTR access mode back to Normal mode */
2333 *dscr &= ~DSCR_MA;
2334 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2335 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2336 if (retval != ERROR_OK)
2337 return retval;
2338
2339 /* Step 3.b - read DBGDTRTX for the final value */
2340 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2341 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2342 if (retval != ERROR_OK)
2343 return retval;
2344
2345 target_buffer_set_u32(target, buffer + count * 4, value);
2346 return retval;
2347 }
2348
2349 static int aarch64_read_cpu_memory(struct target *target,
2350 target_addr_t address, uint32_t size,
2351 uint32_t count, uint8_t *buffer)
2352 {
2353 /* read memory through APB-AP */
2354 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2355 struct armv8_common *armv8 = target_to_armv8(target);
2356 struct arm_dpm *dpm = &armv8->dpm;
2357 struct arm *arm = &armv8->arm;
2358 uint32_t dscr;
2359
2360 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2361 address, size, count);
2362
2363 if (target->state != TARGET_HALTED) {
2364 LOG_TARGET_ERROR(target, "not halted");
2365 return ERROR_TARGET_NOT_HALTED;
2366 }
2367
2368 /* Mark register X0 as dirty, as it will be used
2369 * for transferring the data.
2370 * It will be restored automatically when exiting
2371 * debug mode
2372 */
2373 armv8_reg_current(arm, 0)->dirty = true;
2374
2375 /* Read DSCR */
2376 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2377 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2378 if (retval != ERROR_OK)
2379 return retval;
2380
2381 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2382
2383 /* Set Normal access mode */
2384 dscr &= ~DSCR_MA;
2385 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2386 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2387 if (retval != ERROR_OK)
2388 return retval;
2389
2390 if (arm->core_state == ARM_STATE_AARCH64) {
2391 /* Write X0 with value 'address' using write procedure */
2392 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2393 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2394 retval = dpm->instr_write_data_dcc_64(dpm,
2395 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2396 } else {
2397 /* Write R0 with value 'address' using write procedure */
2398 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2399 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2400 retval = dpm->instr_write_data_dcc(dpm,
2401 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2402 }
2403
2404 if (retval != ERROR_OK)
2405 return retval;
2406
2407 if (size == 4 && (address % 4) == 0)
2408 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2409 else
2410 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2411
2412 if (dscr & DSCR_MA) {
2413 dscr &= ~DSCR_MA;
2414 mem_ap_write_atomic_u32(armv8->debug_ap,
2415 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2416 }
2417
2418 if (retval != ERROR_OK)
2419 return retval;
2420
2421 /* Check for sticky abort flags in the DSCR */
2422 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2423 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2424 if (retval != ERROR_OK)
2425 return retval;
2426
2427 dpm->dscr = dscr;
2428
2429 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2430 /* Abort occurred - clear it and exit */
2431 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2432 armv8_dpm_handle_exception(dpm, true);
2433 return ERROR_FAIL;
2434 }
2435
2436 /* Done */
2437 return ERROR_OK;
2438 }
2439
2440 static int aarch64_read_phys_memory(struct target *target,
2441 target_addr_t address, uint32_t size,
2442 uint32_t count, uint8_t *buffer)
2443 {
2444 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2445
2446 if (count && buffer) {
2447 /* read memory through APB-AP */
2448 retval = aarch64_mmu_modify(target, 0);
2449 if (retval != ERROR_OK)
2450 return retval;
2451 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2452 }
2453 return retval;
2454 }
2455
2456 static int aarch64_read_memory(struct target *target, target_addr_t address,
2457 uint32_t size, uint32_t count, uint8_t *buffer)
2458 {
2459 int mmu_enabled = 0;
2460 int retval;
2461
2462 /* determine if MMU was enabled on target stop */
2463 retval = aarch64_mmu(target, &mmu_enabled);
2464 if (retval != ERROR_OK)
2465 return retval;
2466
2467 if (mmu_enabled) {
2468 /* enable MMU as we could have disabled it for phys access */
2469 retval = aarch64_mmu_modify(target, 1);
2470 if (retval != ERROR_OK)
2471 return retval;
2472 }
2473 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2474 }
2475
2476 static int aarch64_write_phys_memory(struct target *target,
2477 target_addr_t address, uint32_t size,
2478 uint32_t count, const uint8_t *buffer)
2479 {
2480 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2481
2482 if (count && buffer) {
2483 /* write memory through APB-AP */
2484 retval = aarch64_mmu_modify(target, 0);
2485 if (retval != ERROR_OK)
2486 return retval;
2487 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2488 }
2489
2490 return retval;
2491 }
2492
2493 static int aarch64_write_memory(struct target *target, target_addr_t address,
2494 uint32_t size, uint32_t count, const uint8_t *buffer)
2495 {
2496 int mmu_enabled = 0;
2497 int retval;
2498
2499 /* determine if MMU was enabled on target stop */
2500 retval = aarch64_mmu(target, &mmu_enabled);
2501 if (retval != ERROR_OK)
2502 return retval;
2503
2504 if (mmu_enabled) {
2505 /* enable MMU as we could have disabled it for phys access */
2506 retval = aarch64_mmu_modify(target, 1);
2507 if (retval != ERROR_OK)
2508 return retval;
2509 }
2510 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2511 }
2512
2513 static int aarch64_handle_target_request(void *priv)
2514 {
2515 struct target *target = priv;
2516 struct armv8_common *armv8 = target_to_armv8(target);
2517 int retval;
2518
2519 if (!target_was_examined(target))
2520 return ERROR_OK;
2521 if (!target->dbg_msg_enabled)
2522 return ERROR_OK;
2523
2524 if (target->state == TARGET_RUNNING) {
2525 uint32_t request;
2526 uint32_t dscr;
2527 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2528 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2529
2530 /* check if we have data */
2531 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2532 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2533 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2534 if (retval == ERROR_OK) {
2535 target_request(target, request);
2536 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2537 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2538 }
2539 }
2540 }
2541
2542 return ERROR_OK;
2543 }
2544
2545 static int aarch64_examine_first(struct target *target)
2546 {
2547 struct aarch64_common *aarch64 = target_to_aarch64(target);
2548 struct armv8_common *armv8 = &aarch64->armv8_common;
2549 struct adiv5_dap *swjdp = armv8->arm.dap;
2550 struct aarch64_private_config *pc = target->private_config;
2551 int i;
2552 int retval = ERROR_OK;
2553 uint64_t debug, ttypr;
2554 uint32_t cpuid;
2555 uint32_t tmp0, tmp1, tmp2, tmp3;
2556 debug = ttypr = cpuid = 0;
2557
2558 if (!pc)
2559 return ERROR_FAIL;
2560
2561 if (!armv8->debug_ap) {
2562 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2563 /* Search for the APB-AB */
2564 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2565 if (retval != ERROR_OK) {
2566 LOG_ERROR("Could not find APB-AP for debug access");
2567 return retval;
2568 }
2569 } else {
2570 armv8->debug_ap = dap_get_ap(swjdp, pc->adiv5_config.ap_num);
2571 if (!armv8->debug_ap) {
2572 LOG_ERROR("Cannot get AP");
2573 return ERROR_FAIL;
2574 }
2575 }
2576 }
2577
2578 retval = mem_ap_init(armv8->debug_ap);
2579 if (retval != ERROR_OK) {
2580 LOG_ERROR("Could not initialize the APB-AP");
2581 return retval;
2582 }
2583
2584 armv8->debug_ap->memaccess_tck = 10;
2585
2586 if (!target->dbgbase_set) {
2587 /* Lookup Processor DAP */
2588 retval = dap_lookup_cs_component(armv8->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2589 &armv8->debug_base, target->coreid);
2590 if (retval != ERROR_OK)
2591 return retval;
2592 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2593 target->coreid, armv8->debug_base);
2594 } else
2595 armv8->debug_base = target->dbgbase;
2596
2597 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2598 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2599 if (retval != ERROR_OK) {
2600 LOG_DEBUG("Examine %s failed", "oslock");
2601 return retval;
2602 }
2603
2604 retval = mem_ap_read_u32(armv8->debug_ap,
2605 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2606 if (retval != ERROR_OK) {
2607 LOG_DEBUG("Examine %s failed", "CPUID");
2608 return retval;
2609 }
2610
2611 retval = mem_ap_read_u32(armv8->debug_ap,
2612 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2613 retval += mem_ap_read_u32(armv8->debug_ap,
2614 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2615 if (retval != ERROR_OK) {
2616 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2617 return retval;
2618 }
2619 retval = mem_ap_read_u32(armv8->debug_ap,
2620 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2621 retval += mem_ap_read_u32(armv8->debug_ap,
2622 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2623 if (retval != ERROR_OK) {
2624 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2625 return retval;
2626 }
2627
2628 retval = dap_run(armv8->debug_ap->dap);
2629 if (retval != ERROR_OK) {
2630 LOG_ERROR("%s: examination failed\n", target_name(target));
2631 return retval;
2632 }
2633
2634 ttypr |= tmp1;
2635 ttypr = (ttypr << 32) | tmp0;
2636 debug |= tmp3;
2637 debug = (debug << 32) | tmp2;
2638
2639 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2640 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2641 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2642
2643 if (!pc->cti) {
2644 LOG_TARGET_ERROR(target, "CTI not specified");
2645 return ERROR_FAIL;
2646 }
2647
2648 armv8->cti = pc->cti;
2649
2650 retval = aarch64_dpm_setup(aarch64, debug);
2651 if (retval != ERROR_OK)
2652 return retval;
2653
2654 /* Setup Breakpoint Register Pairs */
2655 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2656 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2657 aarch64->brp_num_available = aarch64->brp_num;
2658 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2659 for (i = 0; i < aarch64->brp_num; i++) {
2660 aarch64->brp_list[i].used = 0;
2661 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2662 aarch64->brp_list[i].type = BRP_NORMAL;
2663 else
2664 aarch64->brp_list[i].type = BRP_CONTEXT;
2665 aarch64->brp_list[i].value = 0;
2666 aarch64->brp_list[i].control = 0;
2667 aarch64->brp_list[i].brpn = i;
2668 }
2669
2670 /* Setup Watchpoint Register Pairs */
2671 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2672 aarch64->wp_num_available = aarch64->wp_num;
2673 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2674 for (i = 0; i < aarch64->wp_num; i++) {
2675 aarch64->wp_list[i].used = 0;
2676 aarch64->wp_list[i].type = BRP_NORMAL;
2677 aarch64->wp_list[i].value = 0;
2678 aarch64->wp_list[i].control = 0;
2679 aarch64->wp_list[i].brpn = i;
2680 }
2681
2682 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2683 aarch64->brp_num, aarch64->wp_num);
2684
2685 target->state = TARGET_UNKNOWN;
2686 target->debug_reason = DBG_REASON_NOTHALTED;
2687 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2688 target_set_examined(target);
2689 return ERROR_OK;
2690 }
2691
2692 static int aarch64_examine(struct target *target)
2693 {
2694 int retval = ERROR_OK;
2695
2696 /* don't re-probe hardware after each reset */
2697 if (!target_was_examined(target))
2698 retval = aarch64_examine_first(target);
2699
2700 /* Configure core debug access */
2701 if (retval == ERROR_OK)
2702 retval = aarch64_init_debug_access(target);
2703
2704 if (retval == ERROR_OK)
2705 retval = aarch64_poll(target);
2706
2707 return retval;
2708 }
2709
2710 /*
2711 * Cortex-A8 target creation and initialization
2712 */
2713
2714 static int aarch64_init_target(struct command_context *cmd_ctx,
2715 struct target *target)
2716 {
2717 /* examine_first() does a bunch of this */
2718 arm_semihosting_init(target);
2719 return ERROR_OK;
2720 }
2721
2722 static int aarch64_init_arch_info(struct target *target,
2723 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2724 {
2725 struct armv8_common *armv8 = &aarch64->armv8_common;
2726
2727 /* Setup struct aarch64_common */
2728 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2729 armv8->arm.dap = dap;
2730
2731 /* register arch-specific functions */
2732 armv8->examine_debug_reason = NULL;
2733 armv8->post_debug_entry = aarch64_post_debug_entry;
2734 armv8->pre_restore_context = NULL;
2735 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2736
2737 armv8_init_arch_info(target, armv8);
2738 target_register_timer_callback(aarch64_handle_target_request, 1,
2739 TARGET_TIMER_TYPE_PERIODIC, target);
2740
2741 return ERROR_OK;
2742 }
2743
2744 static int armv8r_target_create(struct target *target, Jim_Interp *interp)
2745 {
2746 struct aarch64_private_config *pc = target->private_config;
2747 struct aarch64_common *aarch64;
2748
2749 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2750 return ERROR_FAIL;
2751
2752 aarch64 = calloc(1, sizeof(struct aarch64_common));
2753 if (!aarch64) {
2754 LOG_ERROR("Out of memory");
2755 return ERROR_FAIL;
2756 }
2757
2758 aarch64->armv8_common.is_armv8r = true;
2759
2760 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2761 }
2762
2763 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2764 {
2765 struct aarch64_private_config *pc = target->private_config;
2766 struct aarch64_common *aarch64;
2767
2768 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2769 return ERROR_FAIL;
2770
2771 aarch64 = calloc(1, sizeof(struct aarch64_common));
2772 if (!aarch64) {
2773 LOG_ERROR("Out of memory");
2774 return ERROR_FAIL;
2775 }
2776
2777 aarch64->armv8_common.is_armv8r = false;
2778
2779 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2780 }
2781
2782 static void aarch64_deinit_target(struct target *target)
2783 {
2784 struct aarch64_common *aarch64 = target_to_aarch64(target);
2785 struct armv8_common *armv8 = &aarch64->armv8_common;
2786 struct arm_dpm *dpm = &armv8->dpm;
2787
2788 if (armv8->debug_ap)
2789 dap_put_ap(armv8->debug_ap);
2790
2791 armv8_free_reg_cache(target);
2792 free(aarch64->brp_list);
2793 free(dpm->dbp);
2794 free(dpm->dwp);
2795 free(target->private_config);
2796 free(aarch64);
2797 }
2798
2799 static int aarch64_mmu(struct target *target, int *enabled)
2800 {
2801 struct aarch64_common *aarch64 = target_to_aarch64(target);
2802 struct armv8_common *armv8 = &aarch64->armv8_common;
2803 if (target->state != TARGET_HALTED) {
2804 LOG_TARGET_ERROR(target, "not halted");
2805 return ERROR_TARGET_NOT_HALTED;
2806 }
2807 if (armv8->is_armv8r)
2808 *enabled = 0;
2809 else
2810 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2811 return ERROR_OK;
2812 }
2813
2814 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2815 target_addr_t *phys)
2816 {
2817 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2818 }
2819
2820 /*
2821 * private target configuration items
2822 */
2823 enum aarch64_cfg_param {
2824 CFG_CTI,
2825 };
2826
2827 static const struct jim_nvp nvp_config_opts[] = {
2828 { .name = "-cti", .value = CFG_CTI },
2829 { .name = NULL, .value = -1 }
2830 };
2831
2832 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2833 {
2834 struct aarch64_private_config *pc;
2835 struct jim_nvp *n;
2836 int e;
2837
2838 pc = (struct aarch64_private_config *)target->private_config;
2839 if (!pc) {
2840 pc = calloc(1, sizeof(struct aarch64_private_config));
2841 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2842 target->private_config = pc;
2843 }
2844
2845 /*
2846 * Call adiv5_jim_configure() to parse the common DAP options
2847 * It will return JIM_CONTINUE if it didn't find any known
2848 * options, JIM_OK if it correctly parsed the topmost option
2849 * and JIM_ERR if an error occurred during parameter evaluation.
2850 * For JIM_CONTINUE, we check our own params.
2851 *
2852 * adiv5_jim_configure() assumes 'private_config' to point to
2853 * 'struct adiv5_private_config'. Override 'private_config'!
2854 */
2855 target->private_config = &pc->adiv5_config;
2856 e = adiv5_jim_configure(target, goi);
2857 target->private_config = pc;
2858 if (e != JIM_CONTINUE)
2859 return e;
2860
2861 /* parse config or cget options ... */
2862 if (goi->argc > 0) {
2863 Jim_SetEmptyResult(goi->interp);
2864
2865 /* check first if topmost item is for us */
2866 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2867 goi->argv[0], &n);
2868 if (e != JIM_OK)
2869 return JIM_CONTINUE;
2870
2871 e = jim_getopt_obj(goi, NULL);
2872 if (e != JIM_OK)
2873 return e;
2874
2875 switch (n->value) {
2876 case CFG_CTI: {
2877 if (goi->isconfigure) {
2878 Jim_Obj *o_cti;
2879 struct arm_cti *cti;
2880 e = jim_getopt_obj(goi, &o_cti);
2881 if (e != JIM_OK)
2882 return e;
2883 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2884 if (!cti) {
2885 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2886 return JIM_ERR;
2887 }
2888 pc->cti = cti;
2889 } else {
2890 if (goi->argc != 0) {
2891 Jim_WrongNumArgs(goi->interp,
2892 goi->argc, goi->argv,
2893 "NO PARAMS");
2894 return JIM_ERR;
2895 }
2896
2897 if (!pc || !pc->cti) {
2898 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2899 return JIM_ERR;
2900 }
2901 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2902 }
2903 break;
2904 }
2905
2906 default:
2907 return JIM_CONTINUE;
2908 }
2909 }
2910
2911 return JIM_OK;
2912 }
2913
2914 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2915 {
2916 struct target *target = get_current_target(CMD_CTX);
2917 struct armv8_common *armv8 = target_to_armv8(target);
2918
2919 return armv8_handle_cache_info_command(CMD,
2920 &armv8->armv8_mmu.armv8_cache);
2921 }
2922
2923 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2924 {
2925 struct target *target = get_current_target(CMD_CTX);
2926 if (!target_was_examined(target)) {
2927 LOG_ERROR("target not examined yet");
2928 return ERROR_FAIL;
2929 }
2930
2931 return aarch64_init_debug_access(target);
2932 }
2933
2934 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2935 {
2936 struct target *target = get_current_target(CMD_CTX);
2937
2938 if (!target) {
2939 LOG_ERROR("No target selected");
2940 return ERROR_FAIL;
2941 }
2942
2943 struct aarch64_common *aarch64 = target_to_aarch64(target);
2944
2945 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2946 command_print(CMD, "current target isn't an AArch64");
2947 return ERROR_FAIL;
2948 }
2949
2950 int count = 1;
2951 target_addr_t address;
2952
2953 switch (CMD_ARGC) {
2954 case 2:
2955 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2956 /* FALL THROUGH */
2957 case 1:
2958 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2959 break;
2960 default:
2961 return ERROR_COMMAND_SYNTAX_ERROR;
2962 }
2963
2964 return a64_disassemble(CMD, target, address, count);
2965 }
2966
2967 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2968 {
2969 struct target *target = get_current_target(CMD_CTX);
2970 struct aarch64_common *aarch64 = target_to_aarch64(target);
2971
2972 static const struct nvp nvp_maskisr_modes[] = {
2973 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2974 { .name = "on", .value = AARCH64_ISRMASK_ON },
2975 { .name = NULL, .value = -1 },
2976 };
2977 const struct nvp *n;
2978
2979 if (CMD_ARGC > 0) {
2980 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2981 if (!n->name) {
2982 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2983 return ERROR_COMMAND_SYNTAX_ERROR;
2984 }
2985
2986 aarch64->isrmasking_mode = n->value;
2987 }
2988
2989 n = nvp_value2name(nvp_maskisr_modes, aarch64->isrmasking_mode);
2990 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2991
2992 return ERROR_OK;
2993 }
2994
2995 COMMAND_HANDLER(aarch64_mcrmrc_command)
2996 {
2997 bool is_mcr = false;
2998 unsigned int arg_cnt = 5;
2999
3000 if (!strcmp(CMD_NAME, "mcr")) {
3001 is_mcr = true;
3002 arg_cnt = 6;
3003 }
3004
3005 if (arg_cnt != CMD_ARGC)
3006 return ERROR_COMMAND_SYNTAX_ERROR;
3007
3008 struct target *target = get_current_target(CMD_CTX);
3009 if (!target) {
3010 command_print(CMD, "no current target");
3011 return ERROR_FAIL;
3012 }
3013 if (!target_was_examined(target)) {
3014 command_print(CMD, "%s: not yet examined", target_name(target));
3015 return ERROR_TARGET_NOT_EXAMINED;
3016 }
3017
3018 struct arm *arm = target_to_arm(target);
3019 if (!is_arm(arm)) {
3020 command_print(CMD, "%s: not an ARM", target_name(target));
3021 return ERROR_FAIL;
3022 }
3023
3024 if (target->state != TARGET_HALTED) {
3025 command_print(CMD, "Error: [%s] not halted", target_name(target));
3026 return ERROR_TARGET_NOT_HALTED;
3027 }
3028
3029 if (arm->core_state == ARM_STATE_AARCH64) {
3030 command_print(CMD, "%s: not 32-bit arm target", target_name(target));
3031 return ERROR_FAIL;
3032 }
3033
3034 int cpnum;
3035 uint32_t op1;
3036 uint32_t op2;
3037 uint32_t crn;
3038 uint32_t crm;
3039 uint32_t value;
3040
3041 /* NOTE: parameter sequence matches ARM instruction set usage:
3042 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3043 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3044 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3045 */
3046 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], cpnum);
3047 if (cpnum & ~0xf) {
3048 command_print(CMD, "coprocessor %d out of range", cpnum);
3049 return ERROR_COMMAND_ARGUMENT_INVALID;
3050 }
3051
3052 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], op1);
3053 if (op1 & ~0x7) {
3054 command_print(CMD, "op1 %d out of range", op1);
3055 return ERROR_COMMAND_ARGUMENT_INVALID;
3056 }
3057
3058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], crn);
3059 if (crn & ~0xf) {
3060 command_print(CMD, "CRn %d out of range", crn);
3061 return ERROR_COMMAND_ARGUMENT_INVALID;
3062 }
3063
3064 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], crm);
3065 if (crm & ~0xf) {
3066 command_print(CMD, "CRm %d out of range", crm);
3067 return ERROR_COMMAND_ARGUMENT_INVALID;
3068 }
3069
3070 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], op2);
3071 if (op2 & ~0x7) {
3072 command_print(CMD, "op2 %d out of range", op2);
3073 return ERROR_COMMAND_ARGUMENT_INVALID;
3074 }
3075
3076 if (is_mcr) {
3077 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[5], value);
3078
3079 /* NOTE: parameters reordered! */
3080 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3081 int retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3082 if (retval != ERROR_OK)
3083 return retval;
3084 } else {
3085 value = 0;
3086 /* NOTE: parameters reordered! */
3087 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3088 int retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3089 if (retval != ERROR_OK)
3090 return retval;
3091
3092 command_print(CMD, "0x%" PRIx32, value);
3093 }
3094
3095 return ERROR_OK;
3096 }
3097
3098 static const struct command_registration aarch64_exec_command_handlers[] = {
3099 {
3100 .name = "cache_info",
3101 .handler = aarch64_handle_cache_info_command,
3102 .mode = COMMAND_EXEC,
3103 .help = "display information about target caches",
3104 .usage = "",
3105 },
3106 {
3107 .name = "dbginit",
3108 .handler = aarch64_handle_dbginit_command,
3109 .mode = COMMAND_EXEC,
3110 .help = "Initialize core debug",
3111 .usage = "",
3112 },
3113 {
3114 .name = "disassemble",
3115 .handler = aarch64_handle_disassemble_command,
3116 .mode = COMMAND_EXEC,
3117 .help = "Disassemble instructions",
3118 .usage = "address [count]",
3119 },
3120 {
3121 .name = "maskisr",
3122 .handler = aarch64_mask_interrupts_command,
3123 .mode = COMMAND_ANY,
3124 .help = "mask aarch64 interrupts during single-step",
3125 .usage = "['on'|'off']",
3126 },
3127 {
3128 .name = "mcr",
3129 .mode = COMMAND_EXEC,
3130 .handler = aarch64_mcrmrc_command,
3131 .help = "write coprocessor register",
3132 .usage = "cpnum op1 CRn CRm op2 value",
3133 },
3134 {
3135 .name = "mrc",
3136 .mode = COMMAND_EXEC,
3137 .handler = aarch64_mcrmrc_command,
3138 .help = "read coprocessor register",
3139 .usage = "cpnum op1 CRn CRm op2",
3140 },
3141 {
3142 .chain = smp_command_handlers,
3143 },
3144
3145
3146 COMMAND_REGISTRATION_DONE
3147 };
3148
3149 static const struct command_registration aarch64_command_handlers[] = {
3150 {
3151 .name = "arm",
3152 .mode = COMMAND_ANY,
3153 .help = "ARM Command Group",
3154 .usage = "",
3155 .chain = semihosting_common_handlers
3156 },
3157 {
3158 .chain = armv8_command_handlers,
3159 },
3160 {
3161 .name = "aarch64",
3162 .mode = COMMAND_ANY,
3163 .help = "Aarch64 command group",
3164 .usage = "",
3165 .chain = aarch64_exec_command_handlers,
3166 },
3167 COMMAND_REGISTRATION_DONE
3168 };
3169
3170 struct target_type aarch64_target = {
3171 .name = "aarch64",
3172
3173 .poll = aarch64_poll,
3174 .arch_state = armv8_arch_state,
3175
3176 .halt = aarch64_halt,
3177 .resume = aarch64_resume,
3178 .step = aarch64_step,
3179
3180 .assert_reset = aarch64_assert_reset,
3181 .deassert_reset = aarch64_deassert_reset,
3182
3183 /* REVISIT allow exporting VFP3 registers ... */
3184 .get_gdb_arch = armv8_get_gdb_arch,
3185 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3186
3187 .read_memory = aarch64_read_memory,
3188 .write_memory = aarch64_write_memory,
3189
3190 .add_breakpoint = aarch64_add_breakpoint,
3191 .add_context_breakpoint = aarch64_add_context_breakpoint,
3192 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3193 .remove_breakpoint = aarch64_remove_breakpoint,
3194 .add_watchpoint = aarch64_add_watchpoint,
3195 .remove_watchpoint = aarch64_remove_watchpoint,
3196 .hit_watchpoint = aarch64_hit_watchpoint,
3197
3198 .commands = aarch64_command_handlers,
3199 .target_create = aarch64_target_create,
3200 .target_jim_configure = aarch64_jim_configure,
3201 .init_target = aarch64_init_target,
3202 .deinit_target = aarch64_deinit_target,
3203 .examine = aarch64_examine,
3204
3205 .read_phys_memory = aarch64_read_phys_memory,
3206 .write_phys_memory = aarch64_write_phys_memory,
3207 .mmu = aarch64_mmu,
3208 .virt2phys = aarch64_virt2phys,
3209 };
3210
3211 struct target_type armv8r_target = {
3212 .name = "armv8r",
3213
3214 .poll = aarch64_poll,
3215 .arch_state = armv8_arch_state,
3216
3217 .halt = aarch64_halt,
3218 .resume = aarch64_resume,
3219 .step = aarch64_step,
3220
3221 .assert_reset = aarch64_assert_reset,
3222 .deassert_reset = aarch64_deassert_reset,
3223
3224 /* REVISIT allow exporting VFP3 registers ... */
3225 .get_gdb_arch = armv8_get_gdb_arch,
3226 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3227
3228 .read_memory = aarch64_read_phys_memory,
3229 .write_memory = aarch64_write_phys_memory,
3230
3231 .add_breakpoint = aarch64_add_breakpoint,
3232 .add_context_breakpoint = aarch64_add_context_breakpoint,
3233 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3234 .remove_breakpoint = aarch64_remove_breakpoint,
3235 .add_watchpoint = aarch64_add_watchpoint,
3236 .remove_watchpoint = aarch64_remove_watchpoint,
3237 .hit_watchpoint = aarch64_hit_watchpoint,
3238
3239 .commands = aarch64_command_handlers,
3240 .target_create = armv8r_target_create,
3241 .target_jim_configure = aarch64_jim_configure,
3242 .init_target = aarch64_init_target,
3243 .deinit_target = aarch64_deinit_target,
3244 .examine = aarch64_examine,
3245 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)