openocd: trivial replace of jim-nvp with new nvp
[openocd.git] / src / target / aarch64.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
5 * *
6 ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include "breakpoints.h"
13 #include "aarch64.h"
14 #include "a64_disassembler.h"
15 #include "register.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
23 #include "smp.h"
24 #include <helper/nvp.h>
25 #include <helper/time_support.h>
26
27 enum restart_mode {
28 RESTART_LAZY,
29 RESTART_SYNC,
30 };
31
32 enum halt_mode {
33 HALT_LAZY,
34 HALT_SYNC,
35 };
36
37 struct aarch64_private_config {
38 struct adiv5_private_config adiv5_config;
39 struct arm_cti *cti;
40 };
41
42 static int aarch64_poll(struct target *target);
43 static int aarch64_debug_entry(struct target *target);
44 static int aarch64_restore_context(struct target *target, bool bpwp);
45 static int aarch64_set_breakpoint(struct target *target,
46 struct breakpoint *breakpoint, uint8_t matchmode);
47 static int aarch64_set_context_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int aarch64_set_hybrid_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int aarch64_unset_breakpoint(struct target *target,
52 struct breakpoint *breakpoint);
53 static int aarch64_mmu(struct target *target, int *enabled);
54 static int aarch64_virt2phys(struct target *target,
55 target_addr_t virt, target_addr_t *phys);
56 static int aarch64_read_cpu_memory(struct target *target,
57 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
58
59 static int aarch64_restore_system_control_reg(struct target *target)
60 {
61 enum arm_mode target_mode = ARM_MODE_ANY;
62 int retval = ERROR_OK;
63 uint32_t instr;
64
65 struct aarch64_common *aarch64 = target_to_aarch64(target);
66 struct armv8_common *armv8 = target_to_armv8(target);
67
68 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
69 aarch64->system_control_reg_curr = aarch64->system_control_reg;
70 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
71
72 switch (armv8->arm.core_mode) {
73 case ARMV8_64_EL0T:
74 target_mode = ARMV8_64_EL1H;
75 /* fall through */
76 case ARMV8_64_EL1T:
77 case ARMV8_64_EL1H:
78 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
79 break;
80 case ARMV8_64_EL2T:
81 case ARMV8_64_EL2H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
83 break;
84 case ARMV8_64_EL3H:
85 case ARMV8_64_EL3T:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
87 break;
88
89 case ARM_MODE_SVC:
90 case ARM_MODE_ABT:
91 case ARM_MODE_FIQ:
92 case ARM_MODE_IRQ:
93 case ARM_MODE_HYP:
94 case ARM_MODE_UND:
95 case ARM_MODE_SYS:
96 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
97 break;
98
99 default:
100 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
101 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
102 return ERROR_FAIL;
103 }
104
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
111
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114 }
115
116 return retval;
117 }
118
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 enum arm_mode target_mode = ARM_MODE_ANY;
128 uint32_t instr = 0;
129
130 if (enable) {
131 /* if mmu enabled at target stop and mmu not enable */
132 if (!(aarch64->system_control_reg & 0x1U)) {
133 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
134 return ERROR_FAIL;
135 }
136 if (!(aarch64->system_control_reg_curr & 0x1U))
137 aarch64->system_control_reg_curr |= 0x1U;
138 } else {
139 if (aarch64->system_control_reg_curr & 0x4U) {
140 /* data cache is active */
141 aarch64->system_control_reg_curr &= ~0x4U;
142 /* flush data cache armv8 function to be called */
143 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
144 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
145 }
146 if ((aarch64->system_control_reg_curr & 0x1U)) {
147 aarch64->system_control_reg_curr &= ~0x1U;
148 }
149 }
150
151 switch (armv8->arm.core_mode) {
152 case ARMV8_64_EL0T:
153 target_mode = ARMV8_64_EL1H;
154 /* fall through */
155 case ARMV8_64_EL1T:
156 case ARMV8_64_EL1H:
157 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
158 break;
159 case ARMV8_64_EL2T:
160 case ARMV8_64_EL2H:
161 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
162 break;
163 case ARMV8_64_EL3H:
164 case ARMV8_64_EL3T:
165 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
166 break;
167
168 case ARM_MODE_SVC:
169 case ARM_MODE_ABT:
170 case ARM_MODE_FIQ:
171 case ARM_MODE_IRQ:
172 case ARM_MODE_HYP:
173 case ARM_MODE_UND:
174 case ARM_MODE_SYS:
175 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
176 break;
177
178 default:
179 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
180 break;
181 }
182 if (target_mode != ARM_MODE_ANY)
183 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
184
185 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
186 aarch64->system_control_reg_curr);
187
188 if (target_mode != ARM_MODE_ANY)
189 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
190
191 return retval;
192 }
193
194 /*
195 * Basic debug access, very low level assumes state is saved
196 */
197 static int aarch64_init_debug_access(struct target *target)
198 {
199 struct armv8_common *armv8 = target_to_armv8(target);
200 int retval;
201 uint32_t dummy;
202
203 LOG_DEBUG("%s", target_name(target));
204
205 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
206 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
207 if (retval != ERROR_OK) {
208 LOG_DEBUG("Examine %s failed", "oslock");
209 return retval;
210 }
211
212 /* Clear Sticky Power Down status Bit in PRSR to enable access to
213 the registers in the Core Power Domain */
214 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
215 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /*
220 * Static CTI configuration:
221 * Channel 0 -> trigger outputs HALT request to PE
222 * Channel 1 -> trigger outputs Resume request to PE
223 * Gate all channel trigger events from entering the CTM
224 */
225
226 /* Enable CTI */
227 retval = arm_cti_enable(armv8->cti, true);
228 /* By default, gate all channel events to and from the CTM */
229 if (retval == ERROR_OK)
230 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
231 /* output halt requests to PE on channel 0 event */
232 if (retval == ERROR_OK)
233 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
234 /* output restart requests to PE on channel 1 event */
235 if (retval == ERROR_OK)
236 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Resync breakpoint registers */
241
242 return ERROR_OK;
243 }
244
245 /* Write to memory mapped registers directly with no cache or mmu handling */
246 static int aarch64_dap_write_memap_register_u32(struct target *target,
247 target_addr_t address,
248 uint32_t value)
249 {
250 int retval;
251 struct armv8_common *armv8 = target_to_armv8(target);
252
253 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
254
255 return retval;
256 }
257
258 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
259 {
260 struct arm_dpm *dpm = &a8->armv8_common.dpm;
261 int retval;
262
263 dpm->arm = &a8->armv8_common.arm;
264 dpm->didr = debug;
265
266 retval = armv8_dpm_setup(dpm);
267 if (retval == ERROR_OK)
268 retval = armv8_dpm_initialize(dpm);
269
270 return retval;
271 }
272
273 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
274 {
275 struct armv8_common *armv8 = target_to_armv8(target);
276 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
277 }
278
279 static int aarch64_check_state_one(struct target *target,
280 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
281 {
282 struct armv8_common *armv8 = target_to_armv8(target);
283 uint32_t prsr;
284 int retval;
285
286 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
287 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
288 if (retval != ERROR_OK)
289 return retval;
290
291 if (p_prsr)
292 *p_prsr = prsr;
293
294 if (p_result)
295 *p_result = (prsr & mask) == (val & mask);
296
297 return ERROR_OK;
298 }
299
300 static int aarch64_wait_halt_one(struct target *target)
301 {
302 int retval = ERROR_OK;
303 uint32_t prsr;
304
305 int64_t then = timeval_ms();
306 for (;;) {
307 int halted;
308
309 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
310 if (retval != ERROR_OK || halted)
311 break;
312
313 if (timeval_ms() > then + 1000) {
314 retval = ERROR_TARGET_TIMEOUT;
315 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
316 break;
317 }
318 }
319 return retval;
320 }
321
322 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
323 {
324 int retval = ERROR_OK;
325 struct target_list *head;
326 struct target *first = NULL;
327
328 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
329
330 foreach_smp_target(head, target->smp_targets) {
331 struct target *curr = head->target;
332 struct armv8_common *armv8 = target_to_armv8(curr);
333
334 if (exc_target && curr == target)
335 continue;
336 if (!target_was_examined(curr))
337 continue;
338 if (curr->state != TARGET_RUNNING)
339 continue;
340
341 /* HACK: mark this target as prepared for halting */
342 curr->debug_reason = DBG_REASON_DBGRQ;
343
344 /* open the gate for channel 0 to let HALT requests pass to the CTM */
345 retval = arm_cti_ungate_channel(armv8->cti, 0);
346 if (retval == ERROR_OK)
347 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
348 if (retval != ERROR_OK)
349 break;
350
351 LOG_DEBUG("target %s prepared", target_name(curr));
352
353 if (!first)
354 first = curr;
355 }
356
357 if (p_first) {
358 if (exc_target && first)
359 *p_first = first;
360 else
361 *p_first = target;
362 }
363
364 return retval;
365 }
366
367 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
368 {
369 int retval = ERROR_OK;
370 struct armv8_common *armv8 = target_to_armv8(target);
371
372 LOG_DEBUG("%s", target_name(target));
373
374 /* allow Halting Debug Mode */
375 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
376 if (retval != ERROR_OK)
377 return retval;
378
379 /* trigger an event on channel 0, this outputs a halt request to the PE */
380 retval = arm_cti_pulse_channel(armv8->cti, 0);
381 if (retval != ERROR_OK)
382 return retval;
383
384 if (mode == HALT_SYNC) {
385 retval = aarch64_wait_halt_one(target);
386 if (retval != ERROR_OK) {
387 if (retval == ERROR_TARGET_TIMEOUT)
388 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
389 return retval;
390 }
391 }
392
393 return ERROR_OK;
394 }
395
396 static int aarch64_halt_smp(struct target *target, bool exc_target)
397 {
398 struct target *next = target;
399 int retval;
400
401 /* prepare halt on all PEs of the group */
402 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
403
404 if (exc_target && next == target)
405 return retval;
406
407 /* halt the target PE */
408 if (retval == ERROR_OK)
409 retval = aarch64_halt_one(next, HALT_LAZY);
410
411 if (retval != ERROR_OK)
412 return retval;
413
414 /* wait for all PEs to halt */
415 int64_t then = timeval_ms();
416 for (;;) {
417 bool all_halted = true;
418 struct target_list *head;
419 struct target *curr;
420
421 foreach_smp_target(head, target->smp_targets) {
422 int halted;
423
424 curr = head->target;
425
426 if (!target_was_examined(curr))
427 continue;
428
429 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
430 if (retval != ERROR_OK || !halted) {
431 all_halted = false;
432 break;
433 }
434 }
435
436 if (all_halted)
437 break;
438
439 if (timeval_ms() > then + 1000) {
440 retval = ERROR_TARGET_TIMEOUT;
441 break;
442 }
443
444 /*
445 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
446 * and it looks like the CTI's are not connected by a common
447 * trigger matrix. It seems that we need to halt one core in each
448 * cluster explicitly. So if we find that a core has not halted
449 * yet, we trigger an explicit halt for the second cluster.
450 */
451 retval = aarch64_halt_one(curr, HALT_LAZY);
452 if (retval != ERROR_OK)
453 break;
454 }
455
456 return retval;
457 }
458
459 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
460 {
461 struct target *gdb_target = NULL;
462 struct target_list *head;
463 struct target *curr;
464
465 if (debug_reason == DBG_REASON_NOTHALTED) {
466 LOG_DEBUG("Halting remaining targets in SMP group");
467 aarch64_halt_smp(target, true);
468 }
469
470 /* poll all targets in the group, but skip the target that serves GDB */
471 foreach_smp_target(head, target->smp_targets) {
472 curr = head->target;
473 /* skip calling context */
474 if (curr == target)
475 continue;
476 if (!target_was_examined(curr))
477 continue;
478 /* skip targets that were already halted */
479 if (curr->state == TARGET_HALTED)
480 continue;
481 /* remember the gdb_service->target */
482 if (curr->gdb_service)
483 gdb_target = curr->gdb_service->target;
484 /* skip it */
485 if (curr == gdb_target)
486 continue;
487
488 /* avoid recursion in aarch64_poll() */
489 curr->smp = 0;
490 aarch64_poll(curr);
491 curr->smp = 1;
492 }
493
494 /* after all targets were updated, poll the gdb serving target */
495 if (gdb_target && gdb_target != target)
496 aarch64_poll(gdb_target);
497
498 return ERROR_OK;
499 }
500
501 /*
502 * Aarch64 Run control
503 */
504
505 static int aarch64_poll(struct target *target)
506 {
507 enum target_state prev_target_state;
508 int retval = ERROR_OK;
509 int halted;
510
511 retval = aarch64_check_state_one(target,
512 PRSR_HALT, PRSR_HALT, &halted, NULL);
513 if (retval != ERROR_OK)
514 return retval;
515
516 if (halted) {
517 prev_target_state = target->state;
518 if (prev_target_state != TARGET_HALTED) {
519 enum target_debug_reason debug_reason = target->debug_reason;
520
521 /* We have a halting debug event */
522 target->state = TARGET_HALTED;
523 LOG_DEBUG("Target %s halted", target_name(target));
524 retval = aarch64_debug_entry(target);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if (target->smp)
529 update_halt_gdb(target, debug_reason);
530
531 if (arm_semihosting(target, &retval) != 0)
532 return retval;
533
534 switch (prev_target_state) {
535 case TARGET_RUNNING:
536 case TARGET_UNKNOWN:
537 case TARGET_RESET:
538 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
539 break;
540 case TARGET_DEBUG_RUNNING:
541 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
542 break;
543 default:
544 break;
545 }
546 }
547 } else
548 target->state = TARGET_RUNNING;
549
550 return retval;
551 }
552
553 static int aarch64_halt(struct target *target)
554 {
555 struct armv8_common *armv8 = target_to_armv8(target);
556 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
557
558 if (target->smp)
559 return aarch64_halt_smp(target, false);
560
561 return aarch64_halt_one(target, HALT_SYNC);
562 }
563
564 static int aarch64_restore_one(struct target *target, int current,
565 uint64_t *address, int handle_breakpoints, int debug_execution)
566 {
567 struct armv8_common *armv8 = target_to_armv8(target);
568 struct arm *arm = &armv8->arm;
569 int retval;
570 uint64_t resume_pc;
571
572 LOG_DEBUG("%s", target_name(target));
573
574 if (!debug_execution)
575 target_free_all_working_areas(target);
576
577 /* current = 1: continue on current pc, otherwise continue at <address> */
578 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
579 if (!current)
580 resume_pc = *address;
581 else
582 *address = resume_pc;
583
584 /* Make sure that the Armv7 gdb thumb fixups does not
585 * kill the return address
586 */
587 switch (arm->core_state) {
588 case ARM_STATE_ARM:
589 resume_pc &= 0xFFFFFFFC;
590 break;
591 case ARM_STATE_AARCH64:
592 resume_pc &= 0xFFFFFFFFFFFFFFFCULL;
593 break;
594 case ARM_STATE_THUMB:
595 case ARM_STATE_THUMB_EE:
596 /* When the return address is loaded into PC
597 * bit 0 must be 1 to stay in Thumb state
598 */
599 resume_pc |= 0x1;
600 break;
601 case ARM_STATE_JAZELLE:
602 LOG_ERROR("How do I resume into Jazelle state??");
603 return ERROR_FAIL;
604 }
605 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
606 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
607 arm->pc->dirty = true;
608 arm->pc->valid = true;
609
610 /* called it now before restoring context because it uses cpu
611 * register r0 for restoring system control register */
612 retval = aarch64_restore_system_control_reg(target);
613 if (retval == ERROR_OK)
614 retval = aarch64_restore_context(target, handle_breakpoints);
615
616 return retval;
617 }
618
619 /**
620 * prepare single target for restart
621 *
622 *
623 */
624 static int aarch64_prepare_restart_one(struct target *target)
625 {
626 struct armv8_common *armv8 = target_to_armv8(target);
627 int retval;
628 uint32_t dscr;
629 uint32_t tmp;
630
631 LOG_DEBUG("%s", target_name(target));
632
633 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
635 if (retval != ERROR_OK)
636 return retval;
637
638 if ((dscr & DSCR_ITE) == 0)
639 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
640 if ((dscr & DSCR_ERR) != 0)
641 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
642
643 /* acknowledge a pending CTI halt event */
644 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
645 /*
646 * open the CTI gate for channel 1 so that the restart events
647 * get passed along to all PEs. Also close gate for channel 0
648 * to isolate the PE from halt events.
649 */
650 if (retval == ERROR_OK)
651 retval = arm_cti_ungate_channel(armv8->cti, 1);
652 if (retval == ERROR_OK)
653 retval = arm_cti_gate_channel(armv8->cti, 0);
654
655 /* make sure that DSCR.HDE is set */
656 if (retval == ERROR_OK) {
657 dscr |= DSCR_HDE;
658 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
659 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
660 }
661
662 if (retval == ERROR_OK) {
663 /* clear sticky bits in PRSR, SDR is now 0 */
664 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
665 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
666 }
667
668 return retval;
669 }
670
671 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
672 {
673 struct armv8_common *armv8 = target_to_armv8(target);
674 int retval;
675
676 LOG_DEBUG("%s", target_name(target));
677
678 /* trigger an event on channel 1, generates a restart request to the PE */
679 retval = arm_cti_pulse_channel(armv8->cti, 1);
680 if (retval != ERROR_OK)
681 return retval;
682
683 if (mode == RESTART_SYNC) {
684 int64_t then = timeval_ms();
685 for (;;) {
686 int resumed;
687 /*
688 * if PRSR.SDR is set now, the target did restart, even
689 * if it's now already halted again (e.g. due to breakpoint)
690 */
691 retval = aarch64_check_state_one(target,
692 PRSR_SDR, PRSR_SDR, &resumed, NULL);
693 if (retval != ERROR_OK || resumed)
694 break;
695
696 if (timeval_ms() > then + 1000) {
697 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
698 retval = ERROR_TARGET_TIMEOUT;
699 break;
700 }
701 }
702 }
703
704 if (retval != ERROR_OK)
705 return retval;
706
707 target->debug_reason = DBG_REASON_NOTHALTED;
708 target->state = TARGET_RUNNING;
709
710 return ERROR_OK;
711 }
712
713 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
714 {
715 int retval;
716
717 LOG_DEBUG("%s", target_name(target));
718
719 retval = aarch64_prepare_restart_one(target);
720 if (retval == ERROR_OK)
721 retval = aarch64_do_restart_one(target, mode);
722
723 return retval;
724 }
725
726 /*
727 * prepare all but the current target for restart
728 */
729 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
730 {
731 int retval = ERROR_OK;
732 struct target_list *head;
733 struct target *first = NULL;
734 uint64_t address;
735
736 foreach_smp_target(head, target->smp_targets) {
737 struct target *curr = head->target;
738
739 /* skip calling target */
740 if (curr == target)
741 continue;
742 if (!target_was_examined(curr))
743 continue;
744 if (curr->state != TARGET_HALTED)
745 continue;
746
747 /* resume at current address, not in step mode */
748 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
749 if (retval == ERROR_OK)
750 retval = aarch64_prepare_restart_one(curr);
751 if (retval != ERROR_OK) {
752 LOG_ERROR("failed to restore target %s", target_name(curr));
753 break;
754 }
755 /* remember the first valid target in the group */
756 if (!first)
757 first = curr;
758 }
759
760 if (p_first)
761 *p_first = first;
762
763 return retval;
764 }
765
766
767 static int aarch64_step_restart_smp(struct target *target)
768 {
769 int retval = ERROR_OK;
770 struct target_list *head;
771 struct target *first = NULL;
772
773 LOG_DEBUG("%s", target_name(target));
774
775 retval = aarch64_prep_restart_smp(target, 0, &first);
776 if (retval != ERROR_OK)
777 return retval;
778
779 if (first)
780 retval = aarch64_do_restart_one(first, RESTART_LAZY);
781 if (retval != ERROR_OK) {
782 LOG_DEBUG("error restarting target %s", target_name(first));
783 return retval;
784 }
785
786 int64_t then = timeval_ms();
787 for (;;) {
788 struct target *curr = target;
789 bool all_resumed = true;
790
791 foreach_smp_target(head, target->smp_targets) {
792 uint32_t prsr;
793 int resumed;
794
795 curr = head->target;
796
797 if (curr == target)
798 continue;
799
800 if (!target_was_examined(curr))
801 continue;
802
803 retval = aarch64_check_state_one(curr,
804 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
805 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
806 all_resumed = false;
807 break;
808 }
809
810 if (curr->state != TARGET_RUNNING) {
811 curr->state = TARGET_RUNNING;
812 curr->debug_reason = DBG_REASON_NOTHALTED;
813 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
814 }
815 }
816
817 if (all_resumed)
818 break;
819
820 if (timeval_ms() > then + 1000) {
821 LOG_ERROR("%s: timeout waiting for target resume", __func__);
822 retval = ERROR_TARGET_TIMEOUT;
823 break;
824 }
825 /*
826 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
827 * and it looks like the CTI's are not connected by a common
828 * trigger matrix. It seems that we need to halt one core in each
829 * cluster explicitly. So if we find that a core has not halted
830 * yet, we trigger an explicit resume for the second cluster.
831 */
832 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
833 if (retval != ERROR_OK)
834 break;
835 }
836
837 return retval;
838 }
839
840 static int aarch64_resume(struct target *target, int current,
841 target_addr_t address, int handle_breakpoints, int debug_execution)
842 {
843 int retval = 0;
844 uint64_t addr = address;
845
846 struct armv8_common *armv8 = target_to_armv8(target);
847 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
848
849 if (target->state != TARGET_HALTED)
850 return ERROR_TARGET_NOT_HALTED;
851
852 /*
853 * If this target is part of a SMP group, prepare the others
854 * targets for resuming. This involves restoring the complete
855 * target register context and setting up CTI gates to accept
856 * resume events from the trigger matrix.
857 */
858 if (target->smp) {
859 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
860 if (retval != ERROR_OK)
861 return retval;
862 }
863
864 /* all targets prepared, restore and restart the current target */
865 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
866 debug_execution);
867 if (retval == ERROR_OK)
868 retval = aarch64_restart_one(target, RESTART_SYNC);
869 if (retval != ERROR_OK)
870 return retval;
871
872 if (target->smp) {
873 int64_t then = timeval_ms();
874 for (;;) {
875 struct target *curr = target;
876 struct target_list *head;
877 bool all_resumed = true;
878
879 foreach_smp_target(head, target->smp_targets) {
880 uint32_t prsr;
881 int resumed;
882
883 curr = head->target;
884 if (curr == target)
885 continue;
886 if (!target_was_examined(curr))
887 continue;
888
889 retval = aarch64_check_state_one(curr,
890 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
891 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
892 all_resumed = false;
893 break;
894 }
895
896 if (curr->state != TARGET_RUNNING) {
897 curr->state = TARGET_RUNNING;
898 curr->debug_reason = DBG_REASON_NOTHALTED;
899 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
900 }
901 }
902
903 if (all_resumed)
904 break;
905
906 if (timeval_ms() > then + 1000) {
907 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
908 retval = ERROR_TARGET_TIMEOUT;
909 break;
910 }
911
912 /*
913 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
914 * and it looks like the CTI's are not connected by a common
915 * trigger matrix. It seems that we need to halt one core in each
916 * cluster explicitly. So if we find that a core has not halted
917 * yet, we trigger an explicit resume for the second cluster.
918 */
919 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
920 if (retval != ERROR_OK)
921 break;
922 }
923 }
924
925 if (retval != ERROR_OK)
926 return retval;
927
928 target->debug_reason = DBG_REASON_NOTHALTED;
929
930 if (!debug_execution) {
931 target->state = TARGET_RUNNING;
932 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
933 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
934 } else {
935 target->state = TARGET_DEBUG_RUNNING;
936 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
937 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
938 }
939
940 return ERROR_OK;
941 }
942
943 static int aarch64_debug_entry(struct target *target)
944 {
945 int retval = ERROR_OK;
946 struct armv8_common *armv8 = target_to_armv8(target);
947 struct arm_dpm *dpm = &armv8->dpm;
948 enum arm_state core_state;
949 uint32_t dscr;
950
951 /* make sure to clear all sticky errors */
952 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
953 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
954 if (retval == ERROR_OK)
955 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
956 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
957 if (retval == ERROR_OK)
958 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
959
960 if (retval != ERROR_OK)
961 return retval;
962
963 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
964
965 dpm->dscr = dscr;
966 core_state = armv8_dpm_get_core_state(dpm);
967 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
968 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
969
970 /* close the CTI gate for all events */
971 if (retval == ERROR_OK)
972 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
973 /* discard async exceptions */
974 if (retval == ERROR_OK)
975 retval = dpm->instr_cpsr_sync(dpm);
976 if (retval != ERROR_OK)
977 return retval;
978
979 /* Examine debug reason */
980 armv8_dpm_report_dscr(dpm, dscr);
981
982 /* save the memory address that triggered the watchpoint */
983 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
984 uint32_t tmp;
985
986 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
987 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
988 if (retval != ERROR_OK)
989 return retval;
990 target_addr_t edwar = tmp;
991
992 /* EDWAR[63:32] has unknown content in aarch32 state */
993 if (core_state == ARM_STATE_AARCH64) {
994 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
995 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
996 if (retval != ERROR_OK)
997 return retval;
998 edwar |= ((target_addr_t)tmp) << 32;
999 }
1000
1001 armv8->dpm.wp_addr = edwar;
1002 }
1003
1004 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1005
1006 if (retval == ERROR_OK && armv8->post_debug_entry)
1007 retval = armv8->post_debug_entry(target);
1008
1009 return retval;
1010 }
1011
1012 static int aarch64_post_debug_entry(struct target *target)
1013 {
1014 struct aarch64_common *aarch64 = target_to_aarch64(target);
1015 struct armv8_common *armv8 = &aarch64->armv8_common;
1016 int retval;
1017 enum arm_mode target_mode = ARM_MODE_ANY;
1018 uint32_t instr;
1019
1020 switch (armv8->arm.core_mode) {
1021 case ARMV8_64_EL0T:
1022 target_mode = ARMV8_64_EL1H;
1023 /* fall through */
1024 case ARMV8_64_EL1T:
1025 case ARMV8_64_EL1H:
1026 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1027 break;
1028 case ARMV8_64_EL2T:
1029 case ARMV8_64_EL2H:
1030 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1031 break;
1032 case ARMV8_64_EL3H:
1033 case ARMV8_64_EL3T:
1034 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1035 break;
1036
1037 case ARM_MODE_SVC:
1038 case ARM_MODE_ABT:
1039 case ARM_MODE_FIQ:
1040 case ARM_MODE_IRQ:
1041 case ARM_MODE_HYP:
1042 case ARM_MODE_UND:
1043 case ARM_MODE_SYS:
1044 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1045 break;
1046
1047 default:
1048 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1049 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1050 return ERROR_FAIL;
1051 }
1052
1053 if (target_mode != ARM_MODE_ANY)
1054 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1055
1056 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1057 if (retval != ERROR_OK)
1058 return retval;
1059
1060 if (target_mode != ARM_MODE_ANY)
1061 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1062
1063 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1064 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1065
1066 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1067 armv8_identify_cache(armv8);
1068 armv8_read_mpidr(armv8);
1069 }
1070 if (armv8->is_armv8r) {
1071 armv8->armv8_mmu.mmu_enabled = 0;
1072 } else {
1073 armv8->armv8_mmu.mmu_enabled =
1074 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1075 }
1076 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1077 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1078 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1079 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1080 return ERROR_OK;
1081 }
1082
1083 /*
1084 * single-step a target
1085 */
1086 static int aarch64_step(struct target *target, int current, target_addr_t address,
1087 int handle_breakpoints)
1088 {
1089 struct armv8_common *armv8 = target_to_armv8(target);
1090 struct aarch64_common *aarch64 = target_to_aarch64(target);
1091 int saved_retval = ERROR_OK;
1092 int retval;
1093 uint32_t edecr;
1094
1095 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1096
1097 if (target->state != TARGET_HALTED) {
1098 LOG_WARNING("target not halted");
1099 return ERROR_TARGET_NOT_HALTED;
1100 }
1101
1102 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1103 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1104 /* make sure EDECR.SS is not set when restoring the register */
1105
1106 if (retval == ERROR_OK) {
1107 edecr &= ~0x4;
1108 /* set EDECR.SS to enter hardware step mode */
1109 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1110 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1111 }
1112 /* disable interrupts while stepping */
1113 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1114 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1115 /* bail out if stepping setup has failed */
1116 if (retval != ERROR_OK)
1117 return retval;
1118
1119 if (target->smp && (current == 1)) {
1120 /*
1121 * isolate current target so that it doesn't get resumed
1122 * together with the others
1123 */
1124 retval = arm_cti_gate_channel(armv8->cti, 1);
1125 /* resume all other targets in the group */
1126 if (retval == ERROR_OK)
1127 retval = aarch64_step_restart_smp(target);
1128 if (retval != ERROR_OK) {
1129 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1130 return retval;
1131 }
1132 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1133 }
1134
1135 /* all other targets running, restore and restart the current target */
1136 retval = aarch64_restore_one(target, current, &address, 0, 0);
1137 if (retval == ERROR_OK)
1138 retval = aarch64_restart_one(target, RESTART_LAZY);
1139
1140 if (retval != ERROR_OK)
1141 return retval;
1142
1143 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1144 if (!handle_breakpoints)
1145 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1146
1147 int64_t then = timeval_ms();
1148 for (;;) {
1149 int stepped;
1150 uint32_t prsr;
1151
1152 retval = aarch64_check_state_one(target,
1153 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1154 if (retval != ERROR_OK || stepped)
1155 break;
1156
1157 if (timeval_ms() > then + 100) {
1158 LOG_ERROR("timeout waiting for target %s halt after step",
1159 target_name(target));
1160 retval = ERROR_TARGET_TIMEOUT;
1161 break;
1162 }
1163 }
1164
1165 /*
1166 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1167 * causes a timeout. The core takes the step but doesn't complete it and so
1168 * debug state is never entered. However, you can manually halt the core
1169 * as an external debug even is also a WFI wakeup event.
1170 */
1171 if (retval == ERROR_TARGET_TIMEOUT)
1172 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1173
1174 /* restore EDECR */
1175 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1176 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1177 if (retval != ERROR_OK)
1178 return retval;
1179
1180 /* restore interrupts */
1181 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1182 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1183 if (retval != ERROR_OK)
1184 return ERROR_OK;
1185 }
1186
1187 if (saved_retval != ERROR_OK)
1188 return saved_retval;
1189
1190 return ERROR_OK;
1191 }
1192
1193 static int aarch64_restore_context(struct target *target, bool bpwp)
1194 {
1195 struct armv8_common *armv8 = target_to_armv8(target);
1196 struct arm *arm = &armv8->arm;
1197
1198 int retval;
1199
1200 LOG_DEBUG("%s", target_name(target));
1201
1202 if (armv8->pre_restore_context)
1203 armv8->pre_restore_context(target);
1204
1205 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1206 if (retval == ERROR_OK) {
1207 /* registers are now invalid */
1208 register_cache_invalidate(arm->core_cache);
1209 register_cache_invalidate(arm->core_cache->next);
1210 }
1211
1212 return retval;
1213 }
1214
1215 /*
1216 * Cortex-A8 Breakpoint and watchpoint functions
1217 */
1218
1219 /* Setup hardware Breakpoint Register Pair */
1220 static int aarch64_set_breakpoint(struct target *target,
1221 struct breakpoint *breakpoint, uint8_t matchmode)
1222 {
1223 int retval;
1224 int brp_i = 0;
1225 uint32_t control;
1226 uint8_t byte_addr_select = 0x0F;
1227 struct aarch64_common *aarch64 = target_to_aarch64(target);
1228 struct armv8_common *armv8 = &aarch64->armv8_common;
1229 struct aarch64_brp *brp_list = aarch64->brp_list;
1230
1231 if (breakpoint->is_set) {
1232 LOG_WARNING("breakpoint already set");
1233 return ERROR_OK;
1234 }
1235
1236 if (breakpoint->type == BKPT_HARD) {
1237 int64_t bpt_value;
1238 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1239 brp_i++;
1240 if (brp_i >= aarch64->brp_num) {
1241 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1242 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1243 }
1244 breakpoint_hw_set(breakpoint, brp_i);
1245 if (breakpoint->length == 2)
1246 byte_addr_select = (3 << (breakpoint->address & 0x02));
1247 control = ((matchmode & 0x7) << 20)
1248 | (1 << 13)
1249 | (byte_addr_select << 5)
1250 | (3 << 1) | 1;
1251 brp_list[brp_i].used = 1;
1252 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1253 brp_list[brp_i].control = control;
1254 bpt_value = brp_list[brp_i].value;
1255
1256 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1257 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1258 (uint32_t)(bpt_value & 0xFFFFFFFF));
1259 if (retval != ERROR_OK)
1260 return retval;
1261 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1262 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1263 (uint32_t)(bpt_value >> 32));
1264 if (retval != ERROR_OK)
1265 return retval;
1266
1267 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1268 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1269 brp_list[brp_i].control);
1270 if (retval != ERROR_OK)
1271 return retval;
1272 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1273 brp_list[brp_i].control,
1274 brp_list[brp_i].value);
1275
1276 } else if (breakpoint->type == BKPT_SOFT) {
1277 uint32_t opcode;
1278 uint8_t code[4];
1279
1280 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1281 opcode = ARMV8_HLT(11);
1282
1283 if (breakpoint->length != 4)
1284 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1285 } else {
1286 /**
1287 * core_state is ARM_STATE_ARM
1288 * in that case the opcode depends on breakpoint length:
1289 * - if length == 4 => A32 opcode
1290 * - if length == 2 => T32 opcode
1291 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1292 * in that case the length should be changed from 3 to 4 bytes
1293 **/
1294 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1295 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1296
1297 if (breakpoint->length == 3)
1298 breakpoint->length = 4;
1299 }
1300
1301 buf_set_u32(code, 0, 32, opcode);
1302
1303 retval = target_read_memory(target,
1304 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1305 breakpoint->length, 1,
1306 breakpoint->orig_instr);
1307 if (retval != ERROR_OK)
1308 return retval;
1309
1310 armv8_cache_d_inner_flush_virt(armv8,
1311 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1312 breakpoint->length);
1313
1314 retval = target_write_memory(target,
1315 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1316 breakpoint->length, 1, code);
1317 if (retval != ERROR_OK)
1318 return retval;
1319
1320 armv8_cache_d_inner_flush_virt(armv8,
1321 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1322 breakpoint->length);
1323
1324 armv8_cache_i_inner_inval_virt(armv8,
1325 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1326 breakpoint->length);
1327
1328 breakpoint->is_set = true;
1329 }
1330
1331 /* Ensure that halting debug mode is enable */
1332 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1333 if (retval != ERROR_OK) {
1334 LOG_DEBUG("Failed to set DSCR.HDE");
1335 return retval;
1336 }
1337
1338 return ERROR_OK;
1339 }
1340
1341 static int aarch64_set_context_breakpoint(struct target *target,
1342 struct breakpoint *breakpoint, uint8_t matchmode)
1343 {
1344 int retval = ERROR_FAIL;
1345 int brp_i = 0;
1346 uint32_t control;
1347 uint8_t byte_addr_select = 0x0F;
1348 struct aarch64_common *aarch64 = target_to_aarch64(target);
1349 struct armv8_common *armv8 = &aarch64->armv8_common;
1350 struct aarch64_brp *brp_list = aarch64->brp_list;
1351
1352 if (breakpoint->is_set) {
1353 LOG_WARNING("breakpoint already set");
1354 return retval;
1355 }
1356 /*check available context BRPs*/
1357 while ((brp_list[brp_i].used ||
1358 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1359 brp_i++;
1360
1361 if (brp_i >= aarch64->brp_num) {
1362 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1363 return ERROR_FAIL;
1364 }
1365
1366 breakpoint_hw_set(breakpoint, brp_i);
1367 control = ((matchmode & 0x7) << 20)
1368 | (1 << 13)
1369 | (byte_addr_select << 5)
1370 | (3 << 1) | 1;
1371 brp_list[brp_i].used = 1;
1372 brp_list[brp_i].value = (breakpoint->asid);
1373 brp_list[brp_i].control = control;
1374 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1375 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1376 brp_list[brp_i].value);
1377 if (retval != ERROR_OK)
1378 return retval;
1379 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1380 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1381 brp_list[brp_i].control);
1382 if (retval != ERROR_OK)
1383 return retval;
1384 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1385 brp_list[brp_i].control,
1386 brp_list[brp_i].value);
1387 return ERROR_OK;
1388
1389 }
1390
1391 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1392 {
1393 int retval = ERROR_FAIL;
1394 int brp_1 = 0; /* holds the contextID pair */
1395 int brp_2 = 0; /* holds the IVA pair */
1396 uint32_t control_ctx, control_iva;
1397 uint8_t ctx_byte_addr_select = 0x0F;
1398 uint8_t iva_byte_addr_select = 0x0F;
1399 uint8_t ctx_machmode = 0x03;
1400 uint8_t iva_machmode = 0x01;
1401 struct aarch64_common *aarch64 = target_to_aarch64(target);
1402 struct armv8_common *armv8 = &aarch64->armv8_common;
1403 struct aarch64_brp *brp_list = aarch64->brp_list;
1404
1405 if (breakpoint->is_set) {
1406 LOG_WARNING("breakpoint already set");
1407 return retval;
1408 }
1409 /*check available context BRPs*/
1410 while ((brp_list[brp_1].used ||
1411 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1412 brp_1++;
1413
1414 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1415 if (brp_1 >= aarch64->brp_num) {
1416 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1417 return ERROR_FAIL;
1418 }
1419
1420 while ((brp_list[brp_2].used ||
1421 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1422 brp_2++;
1423
1424 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1425 if (brp_2 >= aarch64->brp_num) {
1426 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1427 return ERROR_FAIL;
1428 }
1429
1430 breakpoint_hw_set(breakpoint, brp_1);
1431 breakpoint->linked_brp = brp_2;
1432 control_ctx = ((ctx_machmode & 0x7) << 20)
1433 | (brp_2 << 16)
1434 | (0 << 14)
1435 | (ctx_byte_addr_select << 5)
1436 | (3 << 1) | 1;
1437 brp_list[brp_1].used = 1;
1438 brp_list[brp_1].value = (breakpoint->asid);
1439 brp_list[brp_1].control = control_ctx;
1440 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1441 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1442 brp_list[brp_1].value);
1443 if (retval != ERROR_OK)
1444 return retval;
1445 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1446 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1447 brp_list[brp_1].control);
1448 if (retval != ERROR_OK)
1449 return retval;
1450
1451 control_iva = ((iva_machmode & 0x7) << 20)
1452 | (brp_1 << 16)
1453 | (1 << 13)
1454 | (iva_byte_addr_select << 5)
1455 | (3 << 1) | 1;
1456 brp_list[brp_2].used = 1;
1457 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1458 brp_list[brp_2].control = control_iva;
1459 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1460 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1461 brp_list[brp_2].value & 0xFFFFFFFF);
1462 if (retval != ERROR_OK)
1463 return retval;
1464 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1465 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1466 brp_list[brp_2].value >> 32);
1467 if (retval != ERROR_OK)
1468 return retval;
1469 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1470 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1471 brp_list[brp_2].control);
1472 if (retval != ERROR_OK)
1473 return retval;
1474
1475 return ERROR_OK;
1476 }
1477
1478 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1479 {
1480 int retval;
1481 struct aarch64_common *aarch64 = target_to_aarch64(target);
1482 struct armv8_common *armv8 = &aarch64->armv8_common;
1483 struct aarch64_brp *brp_list = aarch64->brp_list;
1484
1485 if (!breakpoint->is_set) {
1486 LOG_WARNING("breakpoint not set");
1487 return ERROR_OK;
1488 }
1489
1490 if (breakpoint->type == BKPT_HARD) {
1491 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1492 int brp_i = breakpoint->number;
1493 int brp_j = breakpoint->linked_brp;
1494 if (brp_i >= aarch64->brp_num) {
1495 LOG_DEBUG("Invalid BRP number in breakpoint");
1496 return ERROR_OK;
1497 }
1498 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1499 brp_list[brp_i].control, brp_list[brp_i].value);
1500 brp_list[brp_i].used = 0;
1501 brp_list[brp_i].value = 0;
1502 brp_list[brp_i].control = 0;
1503 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1504 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1505 brp_list[brp_i].control);
1506 if (retval != ERROR_OK)
1507 return retval;
1508 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1509 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1510 (uint32_t)brp_list[brp_i].value);
1511 if (retval != ERROR_OK)
1512 return retval;
1513 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1514 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1515 (uint32_t)brp_list[brp_i].value);
1516 if (retval != ERROR_OK)
1517 return retval;
1518 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1519 LOG_DEBUG("Invalid BRP number in breakpoint");
1520 return ERROR_OK;
1521 }
1522 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1523 brp_list[brp_j].control, brp_list[brp_j].value);
1524 brp_list[brp_j].used = 0;
1525 brp_list[brp_j].value = 0;
1526 brp_list[brp_j].control = 0;
1527 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1528 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1529 brp_list[brp_j].control);
1530 if (retval != ERROR_OK)
1531 return retval;
1532 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1533 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1534 (uint32_t)brp_list[brp_j].value);
1535 if (retval != ERROR_OK)
1536 return retval;
1537 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1538 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1539 (uint32_t)brp_list[brp_j].value);
1540 if (retval != ERROR_OK)
1541 return retval;
1542
1543 breakpoint->linked_brp = 0;
1544 breakpoint->is_set = false;
1545 return ERROR_OK;
1546
1547 } else {
1548 int brp_i = breakpoint->number;
1549 if (brp_i >= aarch64->brp_num) {
1550 LOG_DEBUG("Invalid BRP number in breakpoint");
1551 return ERROR_OK;
1552 }
1553 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1554 brp_list[brp_i].control, brp_list[brp_i].value);
1555 brp_list[brp_i].used = 0;
1556 brp_list[brp_i].value = 0;
1557 brp_list[brp_i].control = 0;
1558 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1559 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1560 brp_list[brp_i].control);
1561 if (retval != ERROR_OK)
1562 return retval;
1563 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1564 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1565 brp_list[brp_i].value);
1566 if (retval != ERROR_OK)
1567 return retval;
1568
1569 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1570 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1571 (uint32_t)brp_list[brp_i].value);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 breakpoint->is_set = false;
1575 return ERROR_OK;
1576 }
1577 } else {
1578 /* restore original instruction (kept in target endianness) */
1579
1580 armv8_cache_d_inner_flush_virt(armv8,
1581 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1582 breakpoint->length);
1583
1584 if (breakpoint->length == 4) {
1585 retval = target_write_memory(target,
1586 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1587 4, 1, breakpoint->orig_instr);
1588 if (retval != ERROR_OK)
1589 return retval;
1590 } else {
1591 retval = target_write_memory(target,
1592 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1593 2, 1, breakpoint->orig_instr);
1594 if (retval != ERROR_OK)
1595 return retval;
1596 }
1597
1598 armv8_cache_d_inner_flush_virt(armv8,
1599 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1600 breakpoint->length);
1601
1602 armv8_cache_i_inner_inval_virt(armv8,
1603 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1604 breakpoint->length);
1605 }
1606 breakpoint->is_set = false;
1607
1608 return ERROR_OK;
1609 }
1610
1611 static int aarch64_add_breakpoint(struct target *target,
1612 struct breakpoint *breakpoint)
1613 {
1614 struct aarch64_common *aarch64 = target_to_aarch64(target);
1615
1616 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1617 LOG_INFO("no hardware breakpoint available");
1618 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1619 }
1620
1621 if (breakpoint->type == BKPT_HARD)
1622 aarch64->brp_num_available--;
1623
1624 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1625 }
1626
1627 static int aarch64_add_context_breakpoint(struct target *target,
1628 struct breakpoint *breakpoint)
1629 {
1630 struct aarch64_common *aarch64 = target_to_aarch64(target);
1631
1632 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1633 LOG_INFO("no hardware breakpoint available");
1634 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1635 }
1636
1637 if (breakpoint->type == BKPT_HARD)
1638 aarch64->brp_num_available--;
1639
1640 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1641 }
1642
1643 static int aarch64_add_hybrid_breakpoint(struct target *target,
1644 struct breakpoint *breakpoint)
1645 {
1646 struct aarch64_common *aarch64 = target_to_aarch64(target);
1647
1648 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1649 LOG_INFO("no hardware breakpoint available");
1650 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1651 }
1652
1653 if (breakpoint->type == BKPT_HARD)
1654 aarch64->brp_num_available--;
1655
1656 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1657 }
1658
1659 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1660 {
1661 struct aarch64_common *aarch64 = target_to_aarch64(target);
1662
1663 #if 0
1664 /* It is perfectly possible to remove breakpoints while the target is running */
1665 if (target->state != TARGET_HALTED) {
1666 LOG_WARNING("target not halted");
1667 return ERROR_TARGET_NOT_HALTED;
1668 }
1669 #endif
1670
1671 if (breakpoint->is_set) {
1672 aarch64_unset_breakpoint(target, breakpoint);
1673 if (breakpoint->type == BKPT_HARD)
1674 aarch64->brp_num_available++;
1675 }
1676
1677 return ERROR_OK;
1678 }
1679
1680 /* Setup hardware Watchpoint Register Pair */
1681 static int aarch64_set_watchpoint(struct target *target,
1682 struct watchpoint *watchpoint)
1683 {
1684 int retval;
1685 int wp_i = 0;
1686 uint32_t control, offset, length;
1687 struct aarch64_common *aarch64 = target_to_aarch64(target);
1688 struct armv8_common *armv8 = &aarch64->armv8_common;
1689 struct aarch64_brp *wp_list = aarch64->wp_list;
1690
1691 if (watchpoint->is_set) {
1692 LOG_WARNING("watchpoint already set");
1693 return ERROR_OK;
1694 }
1695
1696 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1697 wp_i++;
1698 if (wp_i >= aarch64->wp_num) {
1699 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1700 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1701 }
1702
1703 control = (1 << 0) /* enable */
1704 | (3 << 1) /* both user and privileged access */
1705 | (1 << 13); /* higher mode control */
1706
1707 switch (watchpoint->rw) {
1708 case WPT_READ:
1709 control |= 1 << 3;
1710 break;
1711 case WPT_WRITE:
1712 control |= 2 << 3;
1713 break;
1714 case WPT_ACCESS:
1715 control |= 3 << 3;
1716 break;
1717 }
1718
1719 /* Match up to 8 bytes. */
1720 offset = watchpoint->address & 7;
1721 length = watchpoint->length;
1722 if (offset + length > sizeof(uint64_t)) {
1723 length = sizeof(uint64_t) - offset;
1724 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1725 }
1726 for (; length > 0; offset++, length--)
1727 control |= (1 << offset) << 5;
1728
1729 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1730 wp_list[wp_i].control = control;
1731
1732 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1733 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1734 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1735 if (retval != ERROR_OK)
1736 return retval;
1737 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1738 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1739 (uint32_t)(wp_list[wp_i].value >> 32));
1740 if (retval != ERROR_OK)
1741 return retval;
1742
1743 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1744 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1745 control);
1746 if (retval != ERROR_OK)
1747 return retval;
1748 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1749 wp_list[wp_i].control, wp_list[wp_i].value);
1750
1751 /* Ensure that halting debug mode is enable */
1752 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1753 if (retval != ERROR_OK) {
1754 LOG_DEBUG("Failed to set DSCR.HDE");
1755 return retval;
1756 }
1757
1758 wp_list[wp_i].used = 1;
1759 watchpoint_set(watchpoint, wp_i);
1760
1761 return ERROR_OK;
1762 }
1763
1764 /* Clear hardware Watchpoint Register Pair */
1765 static int aarch64_unset_watchpoint(struct target *target,
1766 struct watchpoint *watchpoint)
1767 {
1768 int retval;
1769 struct aarch64_common *aarch64 = target_to_aarch64(target);
1770 struct armv8_common *armv8 = &aarch64->armv8_common;
1771 struct aarch64_brp *wp_list = aarch64->wp_list;
1772
1773 if (!watchpoint->is_set) {
1774 LOG_WARNING("watchpoint not set");
1775 return ERROR_OK;
1776 }
1777
1778 int wp_i = watchpoint->number;
1779 if (wp_i >= aarch64->wp_num) {
1780 LOG_DEBUG("Invalid WP number in watchpoint");
1781 return ERROR_OK;
1782 }
1783 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1784 wp_list[wp_i].control, wp_list[wp_i].value);
1785 wp_list[wp_i].used = 0;
1786 wp_list[wp_i].value = 0;
1787 wp_list[wp_i].control = 0;
1788 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1789 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1790 wp_list[wp_i].control);
1791 if (retval != ERROR_OK)
1792 return retval;
1793 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1794 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1795 wp_list[wp_i].value);
1796 if (retval != ERROR_OK)
1797 return retval;
1798
1799 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1800 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1801 (uint32_t)wp_list[wp_i].value);
1802 if (retval != ERROR_OK)
1803 return retval;
1804 watchpoint->is_set = false;
1805
1806 return ERROR_OK;
1807 }
1808
1809 static int aarch64_add_watchpoint(struct target *target,
1810 struct watchpoint *watchpoint)
1811 {
1812 int retval;
1813 struct aarch64_common *aarch64 = target_to_aarch64(target);
1814
1815 if (aarch64->wp_num_available < 1) {
1816 LOG_INFO("no hardware watchpoint available");
1817 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1818 }
1819
1820 retval = aarch64_set_watchpoint(target, watchpoint);
1821 if (retval == ERROR_OK)
1822 aarch64->wp_num_available--;
1823
1824 return retval;
1825 }
1826
1827 static int aarch64_remove_watchpoint(struct target *target,
1828 struct watchpoint *watchpoint)
1829 {
1830 struct aarch64_common *aarch64 = target_to_aarch64(target);
1831
1832 if (watchpoint->is_set) {
1833 aarch64_unset_watchpoint(target, watchpoint);
1834 aarch64->wp_num_available++;
1835 }
1836
1837 return ERROR_OK;
1838 }
1839
1840 /**
1841 * find out which watchpoint hits
1842 * get exception address and compare the address to watchpoints
1843 */
1844 static int aarch64_hit_watchpoint(struct target *target,
1845 struct watchpoint **hit_watchpoint)
1846 {
1847 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1848 return ERROR_FAIL;
1849
1850 struct armv8_common *armv8 = target_to_armv8(target);
1851
1852 target_addr_t exception_address;
1853 struct watchpoint *wp;
1854
1855 exception_address = armv8->dpm.wp_addr;
1856
1857 if (exception_address == 0xFFFFFFFF)
1858 return ERROR_FAIL;
1859
1860 for (wp = target->watchpoints; wp; wp = wp->next)
1861 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1862 *hit_watchpoint = wp;
1863 return ERROR_OK;
1864 }
1865
1866 return ERROR_FAIL;
1867 }
1868
1869 /*
1870 * Cortex-A8 Reset functions
1871 */
1872
1873 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1874 {
1875 struct armv8_common *armv8 = target_to_armv8(target);
1876 uint32_t edecr;
1877 int retval;
1878
1879 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1880 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1881 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1882 if (retval != ERROR_OK)
1883 return retval;
1884
1885 if (enable)
1886 edecr |= ECR_RCE;
1887 else
1888 edecr &= ~ECR_RCE;
1889
1890 return mem_ap_write_atomic_u32(armv8->debug_ap,
1891 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1892 }
1893
1894 static int aarch64_clear_reset_catch(struct target *target)
1895 {
1896 struct armv8_common *armv8 = target_to_armv8(target);
1897 uint32_t edesr;
1898 int retval;
1899 bool was_triggered;
1900
1901 /* check if Reset Catch debug event triggered as expected */
1902 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1903 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1904 if (retval != ERROR_OK)
1905 return retval;
1906
1907 was_triggered = !!(edesr & ESR_RC);
1908 LOG_DEBUG("Reset Catch debug event %s",
1909 was_triggered ? "triggered" : "NOT triggered!");
1910
1911 if (was_triggered) {
1912 /* clear pending Reset Catch debug event */
1913 edesr &= ~ESR_RC;
1914 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1915 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1916 if (retval != ERROR_OK)
1917 return retval;
1918 }
1919
1920 return ERROR_OK;
1921 }
1922
1923 static int aarch64_assert_reset(struct target *target)
1924 {
1925 struct armv8_common *armv8 = target_to_armv8(target);
1926 enum reset_types reset_config = jtag_get_reset_config();
1927 int retval;
1928
1929 LOG_DEBUG(" ");
1930
1931 /* Issue some kind of warm reset. */
1932 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1933 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1934 else if (reset_config & RESET_HAS_SRST) {
1935 bool srst_asserted = false;
1936
1937 if (target->reset_halt && !(reset_config & RESET_SRST_PULLS_TRST)) {
1938 if (target_was_examined(target)) {
1939
1940 if (reset_config & RESET_SRST_NO_GATING) {
1941 /*
1942 * SRST needs to be asserted *before* Reset Catch
1943 * debug event can be set up.
1944 */
1945 adapter_assert_reset();
1946 srst_asserted = true;
1947 }
1948
1949 /* make sure to clear all sticky errors */
1950 mem_ap_write_atomic_u32(armv8->debug_ap,
1951 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1952
1953 /* set up Reset Catch debug event to halt the CPU after reset */
1954 retval = aarch64_enable_reset_catch(target, true);
1955 if (retval != ERROR_OK)
1956 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1957 target_name(target));
1958 } else {
1959 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1960 target_name(target));
1961 }
1962 }
1963
1964 /* REVISIT handle "pulls" cases, if there's
1965 * hardware that needs them to work.
1966 */
1967 if (!srst_asserted)
1968 adapter_assert_reset();
1969 } else {
1970 LOG_ERROR("%s: how to reset?", target_name(target));
1971 return ERROR_FAIL;
1972 }
1973
1974 /* registers are now invalid */
1975 if (target_was_examined(target)) {
1976 register_cache_invalidate(armv8->arm.core_cache);
1977 register_cache_invalidate(armv8->arm.core_cache->next);
1978 }
1979
1980 target->state = TARGET_RESET;
1981
1982 return ERROR_OK;
1983 }
1984
1985 static int aarch64_deassert_reset(struct target *target)
1986 {
1987 int retval;
1988
1989 LOG_DEBUG(" ");
1990
1991 /* be certain SRST is off */
1992 adapter_deassert_reset();
1993
1994 if (!target_was_examined(target))
1995 return ERROR_OK;
1996
1997 retval = aarch64_init_debug_access(target);
1998 if (retval != ERROR_OK)
1999 return retval;
2000
2001 retval = aarch64_poll(target);
2002 if (retval != ERROR_OK)
2003 return retval;
2004
2005 if (target->reset_halt) {
2006 /* clear pending Reset Catch debug event */
2007 retval = aarch64_clear_reset_catch(target);
2008 if (retval != ERROR_OK)
2009 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2010 target_name(target));
2011
2012 /* disable Reset Catch debug event */
2013 retval = aarch64_enable_reset_catch(target, false);
2014 if (retval != ERROR_OK)
2015 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2016 target_name(target));
2017
2018 if (target->state != TARGET_HALTED) {
2019 LOG_WARNING("%s: ran after reset and before halt ...",
2020 target_name(target));
2021 if (target_was_examined(target)) {
2022 retval = aarch64_halt_one(target, HALT_LAZY);
2023 if (retval != ERROR_OK)
2024 return retval;
2025 } else {
2026 target->state = TARGET_UNKNOWN;
2027 }
2028 }
2029 }
2030
2031 return ERROR_OK;
2032 }
2033
2034 static int aarch64_write_cpu_memory_slow(struct target *target,
2035 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2036 {
2037 struct armv8_common *armv8 = target_to_armv8(target);
2038 struct arm_dpm *dpm = &armv8->dpm;
2039 struct arm *arm = &armv8->arm;
2040 int retval;
2041
2042 armv8_reg_current(arm, 1)->dirty = true;
2043
2044 /* change DCC to normal mode if necessary */
2045 if (*dscr & DSCR_MA) {
2046 *dscr &= ~DSCR_MA;
2047 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2048 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2049 if (retval != ERROR_OK)
2050 return retval;
2051 }
2052
2053 while (count) {
2054 uint32_t data, opcode;
2055
2056 /* write the data to store into DTRRX */
2057 if (size == 1)
2058 data = *buffer;
2059 else if (size == 2)
2060 data = target_buffer_get_u16(target, buffer);
2061 else
2062 data = target_buffer_get_u32(target, buffer);
2063 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2064 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2065 if (retval != ERROR_OK)
2066 return retval;
2067
2068 if (arm->core_state == ARM_STATE_AARCH64)
2069 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2070 else
2071 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2072 if (retval != ERROR_OK)
2073 return retval;
2074
2075 if (size == 1)
2076 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2077 else if (size == 2)
2078 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2079 else
2080 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2081 retval = dpm->instr_execute(dpm, opcode);
2082 if (retval != ERROR_OK)
2083 return retval;
2084
2085 /* Advance */
2086 buffer += size;
2087 --count;
2088 }
2089
2090 return ERROR_OK;
2091 }
2092
2093 static int aarch64_write_cpu_memory_fast(struct target *target,
2094 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2095 {
2096 struct armv8_common *armv8 = target_to_armv8(target);
2097 struct arm *arm = &armv8->arm;
2098 int retval;
2099
2100 armv8_reg_current(arm, 1)->dirty = true;
2101
2102 /* Step 1.d - Change DCC to memory mode */
2103 *dscr |= DSCR_MA;
2104 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2105 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2106 if (retval != ERROR_OK)
2107 return retval;
2108
2109
2110 /* Step 2.a - Do the write */
2111 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2112 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2113 if (retval != ERROR_OK)
2114 return retval;
2115
2116 /* Step 3.a - Switch DTR mode back to Normal mode */
2117 *dscr &= ~DSCR_MA;
2118 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2119 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2120 if (retval != ERROR_OK)
2121 return retval;
2122
2123 return ERROR_OK;
2124 }
2125
2126 static int aarch64_write_cpu_memory(struct target *target,
2127 uint64_t address, uint32_t size,
2128 uint32_t count, const uint8_t *buffer)
2129 {
2130 /* write memory through APB-AP */
2131 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2132 struct armv8_common *armv8 = target_to_armv8(target);
2133 struct arm_dpm *dpm = &armv8->dpm;
2134 struct arm *arm = &armv8->arm;
2135 uint32_t dscr;
2136
2137 if (target->state != TARGET_HALTED) {
2138 LOG_WARNING("target not halted");
2139 return ERROR_TARGET_NOT_HALTED;
2140 }
2141
2142 /* Mark register X0 as dirty, as it will be used
2143 * for transferring the data.
2144 * It will be restored automatically when exiting
2145 * debug mode
2146 */
2147 armv8_reg_current(arm, 0)->dirty = true;
2148
2149 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2150
2151 /* Read DSCR */
2152 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2153 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2154 if (retval != ERROR_OK)
2155 return retval;
2156
2157 /* Set Normal access mode */
2158 dscr = (dscr & ~DSCR_MA);
2159 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2160 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2161 if (retval != ERROR_OK)
2162 return retval;
2163
2164 if (arm->core_state == ARM_STATE_AARCH64) {
2165 /* Write X0 with value 'address' using write procedure */
2166 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2167 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2168 retval = dpm->instr_write_data_dcc_64(dpm,
2169 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2170 } else {
2171 /* Write R0 with value 'address' using write procedure */
2172 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2173 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2174 retval = dpm->instr_write_data_dcc(dpm,
2175 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2176 }
2177
2178 if (retval != ERROR_OK)
2179 return retval;
2180
2181 if (size == 4 && (address % 4) == 0)
2182 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2183 else
2184 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2185
2186 if (retval != ERROR_OK) {
2187 /* Unset DTR mode */
2188 mem_ap_read_atomic_u32(armv8->debug_ap,
2189 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2190 dscr &= ~DSCR_MA;
2191 mem_ap_write_atomic_u32(armv8->debug_ap,
2192 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2193 }
2194
2195 /* Check for sticky abort flags in the DSCR */
2196 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2197 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2198 if (retval != ERROR_OK)
2199 return retval;
2200
2201 dpm->dscr = dscr;
2202 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2203 /* Abort occurred - clear it and exit */
2204 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2205 armv8_dpm_handle_exception(dpm, true);
2206 return ERROR_FAIL;
2207 }
2208
2209 /* Done */
2210 return ERROR_OK;
2211 }
2212
2213 static int aarch64_read_cpu_memory_slow(struct target *target,
2214 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2215 {
2216 struct armv8_common *armv8 = target_to_armv8(target);
2217 struct arm_dpm *dpm = &armv8->dpm;
2218 struct arm *arm = &armv8->arm;
2219 int retval;
2220
2221 armv8_reg_current(arm, 1)->dirty = true;
2222
2223 /* change DCC to normal mode (if necessary) */
2224 if (*dscr & DSCR_MA) {
2225 *dscr &= DSCR_MA;
2226 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2227 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2228 if (retval != ERROR_OK)
2229 return retval;
2230 }
2231
2232 while (count) {
2233 uint32_t opcode, data;
2234
2235 if (size == 1)
2236 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2237 else if (size == 2)
2238 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2239 else
2240 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2241 retval = dpm->instr_execute(dpm, opcode);
2242 if (retval != ERROR_OK)
2243 return retval;
2244
2245 if (arm->core_state == ARM_STATE_AARCH64)
2246 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2247 else
2248 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2249 if (retval != ERROR_OK)
2250 return retval;
2251
2252 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2253 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2254 if (retval != ERROR_OK)
2255 return retval;
2256
2257 if (size == 1)
2258 *buffer = (uint8_t)data;
2259 else if (size == 2)
2260 target_buffer_set_u16(target, buffer, (uint16_t)data);
2261 else
2262 target_buffer_set_u32(target, buffer, data);
2263
2264 /* Advance */
2265 buffer += size;
2266 --count;
2267 }
2268
2269 return ERROR_OK;
2270 }
2271
2272 static int aarch64_read_cpu_memory_fast(struct target *target,
2273 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2274 {
2275 struct armv8_common *armv8 = target_to_armv8(target);
2276 struct arm_dpm *dpm = &armv8->dpm;
2277 struct arm *arm = &armv8->arm;
2278 int retval;
2279 uint32_t value;
2280
2281 /* Mark X1 as dirty */
2282 armv8_reg_current(arm, 1)->dirty = true;
2283
2284 if (arm->core_state == ARM_STATE_AARCH64) {
2285 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2286 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2287 } else {
2288 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2289 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2290 }
2291
2292 if (retval != ERROR_OK)
2293 return retval;
2294
2295 /* Step 1.e - Change DCC to memory mode */
2296 *dscr |= DSCR_MA;
2297 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2298 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2299 if (retval != ERROR_OK)
2300 return retval;
2301
2302 /* Step 1.f - read DBGDTRTX and discard the value */
2303 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2304 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2305 if (retval != ERROR_OK)
2306 return retval;
2307
2308 count--;
2309 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2310 * Abort flags are sticky, so can be read at end of transactions
2311 *
2312 * This data is read in aligned to 32 bit boundary.
2313 */
2314
2315 if (count) {
2316 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2317 * increments X0 by 4. */
2318 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2319 armv8->debug_base + CPUV8_DBG_DTRTX);
2320 if (retval != ERROR_OK)
2321 return retval;
2322 }
2323
2324 /* Step 3.a - set DTR access mode back to Normal mode */
2325 *dscr &= ~DSCR_MA;
2326 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2327 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2328 if (retval != ERROR_OK)
2329 return retval;
2330
2331 /* Step 3.b - read DBGDTRTX for the final value */
2332 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2333 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2334 if (retval != ERROR_OK)
2335 return retval;
2336
2337 target_buffer_set_u32(target, buffer + count * 4, value);
2338 return retval;
2339 }
2340
2341 static int aarch64_read_cpu_memory(struct target *target,
2342 target_addr_t address, uint32_t size,
2343 uint32_t count, uint8_t *buffer)
2344 {
2345 /* read memory through APB-AP */
2346 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2347 struct armv8_common *armv8 = target_to_armv8(target);
2348 struct arm_dpm *dpm = &armv8->dpm;
2349 struct arm *arm = &armv8->arm;
2350 uint32_t dscr;
2351
2352 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2353 address, size, count);
2354
2355 if (target->state != TARGET_HALTED) {
2356 LOG_WARNING("target not halted");
2357 return ERROR_TARGET_NOT_HALTED;
2358 }
2359
2360 /* Mark register X0 as dirty, as it will be used
2361 * for transferring the data.
2362 * It will be restored automatically when exiting
2363 * debug mode
2364 */
2365 armv8_reg_current(arm, 0)->dirty = true;
2366
2367 /* Read DSCR */
2368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2369 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2370 if (retval != ERROR_OK)
2371 return retval;
2372
2373 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2374
2375 /* Set Normal access mode */
2376 dscr &= ~DSCR_MA;
2377 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2378 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2379 if (retval != ERROR_OK)
2380 return retval;
2381
2382 if (arm->core_state == ARM_STATE_AARCH64) {
2383 /* Write X0 with value 'address' using write procedure */
2384 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2385 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2386 retval = dpm->instr_write_data_dcc_64(dpm,
2387 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2388 } else {
2389 /* Write R0 with value 'address' using write procedure */
2390 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2391 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2392 retval = dpm->instr_write_data_dcc(dpm,
2393 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2394 }
2395
2396 if (retval != ERROR_OK)
2397 return retval;
2398
2399 if (size == 4 && (address % 4) == 0)
2400 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2401 else
2402 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2403
2404 if (dscr & DSCR_MA) {
2405 dscr &= ~DSCR_MA;
2406 mem_ap_write_atomic_u32(armv8->debug_ap,
2407 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2408 }
2409
2410 if (retval != ERROR_OK)
2411 return retval;
2412
2413 /* Check for sticky abort flags in the DSCR */
2414 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2415 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2416 if (retval != ERROR_OK)
2417 return retval;
2418
2419 dpm->dscr = dscr;
2420
2421 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2422 /* Abort occurred - clear it and exit */
2423 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2424 armv8_dpm_handle_exception(dpm, true);
2425 return ERROR_FAIL;
2426 }
2427
2428 /* Done */
2429 return ERROR_OK;
2430 }
2431
2432 static int aarch64_read_phys_memory(struct target *target,
2433 target_addr_t address, uint32_t size,
2434 uint32_t count, uint8_t *buffer)
2435 {
2436 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2437
2438 if (count && buffer) {
2439 /* read memory through APB-AP */
2440 retval = aarch64_mmu_modify(target, 0);
2441 if (retval != ERROR_OK)
2442 return retval;
2443 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2444 }
2445 return retval;
2446 }
2447
2448 static int aarch64_read_memory(struct target *target, target_addr_t address,
2449 uint32_t size, uint32_t count, uint8_t *buffer)
2450 {
2451 int mmu_enabled = 0;
2452 int retval;
2453
2454 /* determine if MMU was enabled on target stop */
2455 retval = aarch64_mmu(target, &mmu_enabled);
2456 if (retval != ERROR_OK)
2457 return retval;
2458
2459 if (mmu_enabled) {
2460 /* enable MMU as we could have disabled it for phys access */
2461 retval = aarch64_mmu_modify(target, 1);
2462 if (retval != ERROR_OK)
2463 return retval;
2464 }
2465 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2466 }
2467
2468 static int aarch64_write_phys_memory(struct target *target,
2469 target_addr_t address, uint32_t size,
2470 uint32_t count, const uint8_t *buffer)
2471 {
2472 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2473
2474 if (count && buffer) {
2475 /* write memory through APB-AP */
2476 retval = aarch64_mmu_modify(target, 0);
2477 if (retval != ERROR_OK)
2478 return retval;
2479 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2480 }
2481
2482 return retval;
2483 }
2484
2485 static int aarch64_write_memory(struct target *target, target_addr_t address,
2486 uint32_t size, uint32_t count, const uint8_t *buffer)
2487 {
2488 int mmu_enabled = 0;
2489 int retval;
2490
2491 /* determine if MMU was enabled on target stop */
2492 retval = aarch64_mmu(target, &mmu_enabled);
2493 if (retval != ERROR_OK)
2494 return retval;
2495
2496 if (mmu_enabled) {
2497 /* enable MMU as we could have disabled it for phys access */
2498 retval = aarch64_mmu_modify(target, 1);
2499 if (retval != ERROR_OK)
2500 return retval;
2501 }
2502 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2503 }
2504
2505 static int aarch64_handle_target_request(void *priv)
2506 {
2507 struct target *target = priv;
2508 struct armv8_common *armv8 = target_to_armv8(target);
2509 int retval;
2510
2511 if (!target_was_examined(target))
2512 return ERROR_OK;
2513 if (!target->dbg_msg_enabled)
2514 return ERROR_OK;
2515
2516 if (target->state == TARGET_RUNNING) {
2517 uint32_t request;
2518 uint32_t dscr;
2519 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2520 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2521
2522 /* check if we have data */
2523 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2524 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2525 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2526 if (retval == ERROR_OK) {
2527 target_request(target, request);
2528 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2529 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2530 }
2531 }
2532 }
2533
2534 return ERROR_OK;
2535 }
2536
2537 static int aarch64_examine_first(struct target *target)
2538 {
2539 struct aarch64_common *aarch64 = target_to_aarch64(target);
2540 struct armv8_common *armv8 = &aarch64->armv8_common;
2541 struct adiv5_dap *swjdp = armv8->arm.dap;
2542 struct aarch64_private_config *pc = target->private_config;
2543 int i;
2544 int retval = ERROR_OK;
2545 uint64_t debug, ttypr;
2546 uint32_t cpuid;
2547 uint32_t tmp0, tmp1, tmp2, tmp3;
2548 debug = ttypr = cpuid = 0;
2549
2550 if (!pc)
2551 return ERROR_FAIL;
2552
2553 if (!armv8->debug_ap) {
2554 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2555 /* Search for the APB-AB */
2556 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2557 if (retval != ERROR_OK) {
2558 LOG_ERROR("Could not find APB-AP for debug access");
2559 return retval;
2560 }
2561 } else {
2562 armv8->debug_ap = dap_get_ap(swjdp, pc->adiv5_config.ap_num);
2563 if (!armv8->debug_ap) {
2564 LOG_ERROR("Cannot get AP");
2565 return ERROR_FAIL;
2566 }
2567 }
2568 }
2569
2570 retval = mem_ap_init(armv8->debug_ap);
2571 if (retval != ERROR_OK) {
2572 LOG_ERROR("Could not initialize the APB-AP");
2573 return retval;
2574 }
2575
2576 armv8->debug_ap->memaccess_tck = 10;
2577
2578 if (!target->dbgbase_set) {
2579 /* Lookup Processor DAP */
2580 retval = dap_lookup_cs_component(armv8->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2581 &armv8->debug_base, target->coreid);
2582 if (retval != ERROR_OK)
2583 return retval;
2584 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2585 target->coreid, armv8->debug_base);
2586 } else
2587 armv8->debug_base = target->dbgbase;
2588
2589 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2590 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2591 if (retval != ERROR_OK) {
2592 LOG_DEBUG("Examine %s failed", "oslock");
2593 return retval;
2594 }
2595
2596 retval = mem_ap_read_u32(armv8->debug_ap,
2597 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2598 if (retval != ERROR_OK) {
2599 LOG_DEBUG("Examine %s failed", "CPUID");
2600 return retval;
2601 }
2602
2603 retval = mem_ap_read_u32(armv8->debug_ap,
2604 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2605 retval += mem_ap_read_u32(armv8->debug_ap,
2606 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2607 if (retval != ERROR_OK) {
2608 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2609 return retval;
2610 }
2611 retval = mem_ap_read_u32(armv8->debug_ap,
2612 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2613 retval += mem_ap_read_u32(armv8->debug_ap,
2614 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2615 if (retval != ERROR_OK) {
2616 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2617 return retval;
2618 }
2619
2620 retval = dap_run(armv8->debug_ap->dap);
2621 if (retval != ERROR_OK) {
2622 LOG_ERROR("%s: examination failed\n", target_name(target));
2623 return retval;
2624 }
2625
2626 ttypr |= tmp1;
2627 ttypr = (ttypr << 32) | tmp0;
2628 debug |= tmp3;
2629 debug = (debug << 32) | tmp2;
2630
2631 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2632 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2633 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2634
2635 if (!pc->cti) {
2636 LOG_TARGET_ERROR(target, "CTI not specified");
2637 return ERROR_FAIL;
2638 }
2639
2640 armv8->cti = pc->cti;
2641
2642 retval = aarch64_dpm_setup(aarch64, debug);
2643 if (retval != ERROR_OK)
2644 return retval;
2645
2646 /* Setup Breakpoint Register Pairs */
2647 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2648 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2649 aarch64->brp_num_available = aarch64->brp_num;
2650 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2651 for (i = 0; i < aarch64->brp_num; i++) {
2652 aarch64->brp_list[i].used = 0;
2653 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2654 aarch64->brp_list[i].type = BRP_NORMAL;
2655 else
2656 aarch64->brp_list[i].type = BRP_CONTEXT;
2657 aarch64->brp_list[i].value = 0;
2658 aarch64->brp_list[i].control = 0;
2659 aarch64->brp_list[i].brpn = i;
2660 }
2661
2662 /* Setup Watchpoint Register Pairs */
2663 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2664 aarch64->wp_num_available = aarch64->wp_num;
2665 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2666 for (i = 0; i < aarch64->wp_num; i++) {
2667 aarch64->wp_list[i].used = 0;
2668 aarch64->wp_list[i].type = BRP_NORMAL;
2669 aarch64->wp_list[i].value = 0;
2670 aarch64->wp_list[i].control = 0;
2671 aarch64->wp_list[i].brpn = i;
2672 }
2673
2674 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2675 aarch64->brp_num, aarch64->wp_num);
2676
2677 target->state = TARGET_UNKNOWN;
2678 target->debug_reason = DBG_REASON_NOTHALTED;
2679 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2680 target_set_examined(target);
2681 return ERROR_OK;
2682 }
2683
2684 static int aarch64_examine(struct target *target)
2685 {
2686 int retval = ERROR_OK;
2687
2688 /* don't re-probe hardware after each reset */
2689 if (!target_was_examined(target))
2690 retval = aarch64_examine_first(target);
2691
2692 /* Configure core debug access */
2693 if (retval == ERROR_OK)
2694 retval = aarch64_init_debug_access(target);
2695
2696 return retval;
2697 }
2698
2699 /*
2700 * Cortex-A8 target creation and initialization
2701 */
2702
2703 static int aarch64_init_target(struct command_context *cmd_ctx,
2704 struct target *target)
2705 {
2706 /* examine_first() does a bunch of this */
2707 arm_semihosting_init(target);
2708 return ERROR_OK;
2709 }
2710
2711 static int aarch64_init_arch_info(struct target *target,
2712 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2713 {
2714 struct armv8_common *armv8 = &aarch64->armv8_common;
2715
2716 /* Setup struct aarch64_common */
2717 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2718 armv8->arm.dap = dap;
2719
2720 /* register arch-specific functions */
2721 armv8->examine_debug_reason = NULL;
2722 armv8->post_debug_entry = aarch64_post_debug_entry;
2723 armv8->pre_restore_context = NULL;
2724 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2725
2726 armv8_init_arch_info(target, armv8);
2727 target_register_timer_callback(aarch64_handle_target_request, 1,
2728 TARGET_TIMER_TYPE_PERIODIC, target);
2729
2730 return ERROR_OK;
2731 }
2732
2733 static int armv8r_target_create(struct target *target, Jim_Interp *interp)
2734 {
2735 struct aarch64_private_config *pc = target->private_config;
2736 struct aarch64_common *aarch64;
2737
2738 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2739 return ERROR_FAIL;
2740
2741 aarch64 = calloc(1, sizeof(struct aarch64_common));
2742 if (!aarch64) {
2743 LOG_ERROR("Out of memory");
2744 return ERROR_FAIL;
2745 }
2746
2747 aarch64->armv8_common.is_armv8r = true;
2748
2749 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2750 }
2751
2752 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2753 {
2754 struct aarch64_private_config *pc = target->private_config;
2755 struct aarch64_common *aarch64;
2756
2757 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2758 return ERROR_FAIL;
2759
2760 aarch64 = calloc(1, sizeof(struct aarch64_common));
2761 if (!aarch64) {
2762 LOG_ERROR("Out of memory");
2763 return ERROR_FAIL;
2764 }
2765
2766 aarch64->armv8_common.is_armv8r = false;
2767
2768 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2769 }
2770
2771 static void aarch64_deinit_target(struct target *target)
2772 {
2773 struct aarch64_common *aarch64 = target_to_aarch64(target);
2774 struct armv8_common *armv8 = &aarch64->armv8_common;
2775 struct arm_dpm *dpm = &armv8->dpm;
2776
2777 if (armv8->debug_ap)
2778 dap_put_ap(armv8->debug_ap);
2779
2780 armv8_free_reg_cache(target);
2781 free(aarch64->brp_list);
2782 free(dpm->dbp);
2783 free(dpm->dwp);
2784 free(target->private_config);
2785 free(aarch64);
2786 }
2787
2788 static int aarch64_mmu(struct target *target, int *enabled)
2789 {
2790 struct aarch64_common *aarch64 = target_to_aarch64(target);
2791 struct armv8_common *armv8 = &aarch64->armv8_common;
2792 if (target->state != TARGET_HALTED) {
2793 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2794 return ERROR_TARGET_INVALID;
2795 }
2796 if (armv8->is_armv8r)
2797 *enabled = 0;
2798 else
2799 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2800 return ERROR_OK;
2801 }
2802
2803 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2804 target_addr_t *phys)
2805 {
2806 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2807 }
2808
2809 /*
2810 * private target configuration items
2811 */
2812 enum aarch64_cfg_param {
2813 CFG_CTI,
2814 };
2815
2816 static const struct jim_nvp nvp_config_opts[] = {
2817 { .name = "-cti", .value = CFG_CTI },
2818 { .name = NULL, .value = -1 }
2819 };
2820
2821 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2822 {
2823 struct aarch64_private_config *pc;
2824 struct jim_nvp *n;
2825 int e;
2826
2827 pc = (struct aarch64_private_config *)target->private_config;
2828 if (!pc) {
2829 pc = calloc(1, sizeof(struct aarch64_private_config));
2830 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2831 target->private_config = pc;
2832 }
2833
2834 /*
2835 * Call adiv5_jim_configure() to parse the common DAP options
2836 * It will return JIM_CONTINUE if it didn't find any known
2837 * options, JIM_OK if it correctly parsed the topmost option
2838 * and JIM_ERR if an error occurred during parameter evaluation.
2839 * For JIM_CONTINUE, we check our own params.
2840 *
2841 * adiv5_jim_configure() assumes 'private_config' to point to
2842 * 'struct adiv5_private_config'. Override 'private_config'!
2843 */
2844 target->private_config = &pc->adiv5_config;
2845 e = adiv5_jim_configure(target, goi);
2846 target->private_config = pc;
2847 if (e != JIM_CONTINUE)
2848 return e;
2849
2850 /* parse config or cget options ... */
2851 if (goi->argc > 0) {
2852 Jim_SetEmptyResult(goi->interp);
2853
2854 /* check first if topmost item is for us */
2855 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2856 goi->argv[0], &n);
2857 if (e != JIM_OK)
2858 return JIM_CONTINUE;
2859
2860 e = jim_getopt_obj(goi, NULL);
2861 if (e != JIM_OK)
2862 return e;
2863
2864 switch (n->value) {
2865 case CFG_CTI: {
2866 if (goi->isconfigure) {
2867 Jim_Obj *o_cti;
2868 struct arm_cti *cti;
2869 e = jim_getopt_obj(goi, &o_cti);
2870 if (e != JIM_OK)
2871 return e;
2872 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2873 if (!cti) {
2874 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2875 return JIM_ERR;
2876 }
2877 pc->cti = cti;
2878 } else {
2879 if (goi->argc != 0) {
2880 Jim_WrongNumArgs(goi->interp,
2881 goi->argc, goi->argv,
2882 "NO PARAMS");
2883 return JIM_ERR;
2884 }
2885
2886 if (!pc || !pc->cti) {
2887 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2888 return JIM_ERR;
2889 }
2890 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2891 }
2892 break;
2893 }
2894
2895 default:
2896 return JIM_CONTINUE;
2897 }
2898 }
2899
2900 return JIM_OK;
2901 }
2902
2903 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2904 {
2905 struct target *target = get_current_target(CMD_CTX);
2906 struct armv8_common *armv8 = target_to_armv8(target);
2907
2908 return armv8_handle_cache_info_command(CMD,
2909 &armv8->armv8_mmu.armv8_cache);
2910 }
2911
2912 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2913 {
2914 struct target *target = get_current_target(CMD_CTX);
2915 if (!target_was_examined(target)) {
2916 LOG_ERROR("target not examined yet");
2917 return ERROR_FAIL;
2918 }
2919
2920 return aarch64_init_debug_access(target);
2921 }
2922
2923 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2924 {
2925 struct target *target = get_current_target(CMD_CTX);
2926
2927 if (!target) {
2928 LOG_ERROR("No target selected");
2929 return ERROR_FAIL;
2930 }
2931
2932 struct aarch64_common *aarch64 = target_to_aarch64(target);
2933
2934 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2935 command_print(CMD, "current target isn't an AArch64");
2936 return ERROR_FAIL;
2937 }
2938
2939 int count = 1;
2940 target_addr_t address;
2941
2942 switch (CMD_ARGC) {
2943 case 2:
2944 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2945 /* FALL THROUGH */
2946 case 1:
2947 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2948 break;
2949 default:
2950 return ERROR_COMMAND_SYNTAX_ERROR;
2951 }
2952
2953 return a64_disassemble(CMD, target, address, count);
2954 }
2955
2956 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2957 {
2958 struct target *target = get_current_target(CMD_CTX);
2959 struct aarch64_common *aarch64 = target_to_aarch64(target);
2960
2961 static const struct nvp nvp_maskisr_modes[] = {
2962 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2963 { .name = "on", .value = AARCH64_ISRMASK_ON },
2964 { .name = NULL, .value = -1 },
2965 };
2966 const struct nvp *n;
2967
2968 if (CMD_ARGC > 0) {
2969 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2970 if (!n->name) {
2971 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2972 return ERROR_COMMAND_SYNTAX_ERROR;
2973 }
2974
2975 aarch64->isrmasking_mode = n->value;
2976 }
2977
2978 n = nvp_value2name(nvp_maskisr_modes, aarch64->isrmasking_mode);
2979 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2980
2981 return ERROR_OK;
2982 }
2983
2984 COMMAND_HANDLER(aarch64_mcrmrc_command)
2985 {
2986 bool is_mcr = false;
2987 unsigned int arg_cnt = 5;
2988
2989 if (!strcmp(CMD_NAME, "mcr")) {
2990 is_mcr = true;
2991 arg_cnt = 6;
2992 }
2993
2994 if (arg_cnt != CMD_ARGC)
2995 return ERROR_COMMAND_SYNTAX_ERROR;
2996
2997 struct target *target = get_current_target(CMD_CTX);
2998 if (!target) {
2999 command_print(CMD, "no current target");
3000 return ERROR_FAIL;
3001 }
3002 if (!target_was_examined(target)) {
3003 command_print(CMD, "%s: not yet examined", target_name(target));
3004 return ERROR_TARGET_NOT_EXAMINED;
3005 }
3006
3007 struct arm *arm = target_to_arm(target);
3008 if (!is_arm(arm)) {
3009 command_print(CMD, "%s: not an ARM", target_name(target));
3010 return ERROR_FAIL;
3011 }
3012
3013 if (target->state != TARGET_HALTED)
3014 return ERROR_TARGET_NOT_HALTED;
3015
3016 if (arm->core_state == ARM_STATE_AARCH64) {
3017 command_print(CMD, "%s: not 32-bit arm target", target_name(target));
3018 return ERROR_FAIL;
3019 }
3020
3021 int cpnum;
3022 uint32_t op1;
3023 uint32_t op2;
3024 uint32_t crn;
3025 uint32_t crm;
3026 uint32_t value;
3027
3028 /* NOTE: parameter sequence matches ARM instruction set usage:
3029 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3030 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3031 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3032 */
3033 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], cpnum);
3034 if (cpnum & ~0xf) {
3035 command_print(CMD, "coprocessor %d out of range", cpnum);
3036 return ERROR_COMMAND_ARGUMENT_INVALID;
3037 }
3038
3039 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], op1);
3040 if (op1 & ~0x7) {
3041 command_print(CMD, "op1 %d out of range", op1);
3042 return ERROR_COMMAND_ARGUMENT_INVALID;
3043 }
3044
3045 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], crn);
3046 if (crn & ~0xf) {
3047 command_print(CMD, "CRn %d out of range", crn);
3048 return ERROR_COMMAND_ARGUMENT_INVALID;
3049 }
3050
3051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], crm);
3052 if (crm & ~0xf) {
3053 command_print(CMD, "CRm %d out of range", crm);
3054 return ERROR_COMMAND_ARGUMENT_INVALID;
3055 }
3056
3057 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], op2);
3058 if (op2 & ~0x7) {
3059 command_print(CMD, "op2 %d out of range", op2);
3060 return ERROR_COMMAND_ARGUMENT_INVALID;
3061 }
3062
3063 if (is_mcr) {
3064 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[5], value);
3065
3066 /* NOTE: parameters reordered! */
3067 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3068 int retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3069 if (retval != ERROR_OK)
3070 return retval;
3071 } else {
3072 value = 0;
3073 /* NOTE: parameters reordered! */
3074 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3075 int retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3076 if (retval != ERROR_OK)
3077 return retval;
3078
3079 command_print(CMD, "0x%" PRIx32, value);
3080 }
3081
3082 return ERROR_OK;
3083 }
3084
3085 static const struct command_registration aarch64_exec_command_handlers[] = {
3086 {
3087 .name = "cache_info",
3088 .handler = aarch64_handle_cache_info_command,
3089 .mode = COMMAND_EXEC,
3090 .help = "display information about target caches",
3091 .usage = "",
3092 },
3093 {
3094 .name = "dbginit",
3095 .handler = aarch64_handle_dbginit_command,
3096 .mode = COMMAND_EXEC,
3097 .help = "Initialize core debug",
3098 .usage = "",
3099 },
3100 {
3101 .name = "disassemble",
3102 .handler = aarch64_handle_disassemble_command,
3103 .mode = COMMAND_EXEC,
3104 .help = "Disassemble instructions",
3105 .usage = "address [count]",
3106 },
3107 {
3108 .name = "maskisr",
3109 .handler = aarch64_mask_interrupts_command,
3110 .mode = COMMAND_ANY,
3111 .help = "mask aarch64 interrupts during single-step",
3112 .usage = "['on'|'off']",
3113 },
3114 {
3115 .name = "mcr",
3116 .mode = COMMAND_EXEC,
3117 .handler = aarch64_mcrmrc_command,
3118 .help = "write coprocessor register",
3119 .usage = "cpnum op1 CRn CRm op2 value",
3120 },
3121 {
3122 .name = "mrc",
3123 .mode = COMMAND_EXEC,
3124 .handler = aarch64_mcrmrc_command,
3125 .help = "read coprocessor register",
3126 .usage = "cpnum op1 CRn CRm op2",
3127 },
3128 {
3129 .chain = smp_command_handlers,
3130 },
3131
3132
3133 COMMAND_REGISTRATION_DONE
3134 };
3135
3136 static const struct command_registration aarch64_command_handlers[] = {
3137 {
3138 .name = "arm",
3139 .mode = COMMAND_ANY,
3140 .help = "ARM Command Group",
3141 .usage = "",
3142 .chain = semihosting_common_handlers
3143 },
3144 {
3145 .chain = armv8_command_handlers,
3146 },
3147 {
3148 .name = "aarch64",
3149 .mode = COMMAND_ANY,
3150 .help = "Aarch64 command group",
3151 .usage = "",
3152 .chain = aarch64_exec_command_handlers,
3153 },
3154 COMMAND_REGISTRATION_DONE
3155 };
3156
3157 struct target_type aarch64_target = {
3158 .name = "aarch64",
3159
3160 .poll = aarch64_poll,
3161 .arch_state = armv8_arch_state,
3162
3163 .halt = aarch64_halt,
3164 .resume = aarch64_resume,
3165 .step = aarch64_step,
3166
3167 .assert_reset = aarch64_assert_reset,
3168 .deassert_reset = aarch64_deassert_reset,
3169
3170 /* REVISIT allow exporting VFP3 registers ... */
3171 .get_gdb_arch = armv8_get_gdb_arch,
3172 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3173
3174 .read_memory = aarch64_read_memory,
3175 .write_memory = aarch64_write_memory,
3176
3177 .add_breakpoint = aarch64_add_breakpoint,
3178 .add_context_breakpoint = aarch64_add_context_breakpoint,
3179 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3180 .remove_breakpoint = aarch64_remove_breakpoint,
3181 .add_watchpoint = aarch64_add_watchpoint,
3182 .remove_watchpoint = aarch64_remove_watchpoint,
3183 .hit_watchpoint = aarch64_hit_watchpoint,
3184
3185 .commands = aarch64_command_handlers,
3186 .target_create = aarch64_target_create,
3187 .target_jim_configure = aarch64_jim_configure,
3188 .init_target = aarch64_init_target,
3189 .deinit_target = aarch64_deinit_target,
3190 .examine = aarch64_examine,
3191
3192 .read_phys_memory = aarch64_read_phys_memory,
3193 .write_phys_memory = aarch64_write_phys_memory,
3194 .mmu = aarch64_mmu,
3195 .virt2phys = aarch64_virt2phys,
3196 };
3197
3198 struct target_type armv8r_target = {
3199 .name = "armv8r",
3200
3201 .poll = aarch64_poll,
3202 .arch_state = armv8_arch_state,
3203
3204 .halt = aarch64_halt,
3205 .resume = aarch64_resume,
3206 .step = aarch64_step,
3207
3208 .assert_reset = aarch64_assert_reset,
3209 .deassert_reset = aarch64_deassert_reset,
3210
3211 /* REVISIT allow exporting VFP3 registers ... */
3212 .get_gdb_arch = armv8_get_gdb_arch,
3213 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3214
3215 .read_memory = aarch64_read_phys_memory,
3216 .write_memory = aarch64_write_phys_memory,
3217
3218 .add_breakpoint = aarch64_add_breakpoint,
3219 .add_context_breakpoint = aarch64_add_context_breakpoint,
3220 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3221 .remove_breakpoint = aarch64_remove_breakpoint,
3222 .add_watchpoint = aarch64_add_watchpoint,
3223 .remove_watchpoint = aarch64_remove_watchpoint,
3224 .hit_watchpoint = aarch64_hit_watchpoint,
3225
3226 .commands = aarch64_command_handlers,
3227 .target_create = armv8r_target_create,
3228 .target_jim_configure = aarch64_jim_configure,
3229 .init_target = aarch64_init_target,
3230 .deinit_target = aarch64_deinit_target,
3231 .examine = aarch64_examine,
3232 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)