arm_adi_v5: fix SIGSEGV due to failing re-examine
[openocd.git] / src / target / aarch64.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
5 * *
6 ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include "breakpoints.h"
13 #include "aarch64.h"
14 #include "a64_disassembler.h"
15 #include "register.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
23 #include "smp.h"
24 #include <helper/time_support.h>
25
26 enum restart_mode {
27 RESTART_LAZY,
28 RESTART_SYNC,
29 };
30
31 enum halt_mode {
32 HALT_LAZY,
33 HALT_SYNC,
34 };
35
36 struct aarch64_private_config {
37 struct adiv5_private_config adiv5_config;
38 struct arm_cti *cti;
39 };
40
41 static int aarch64_poll(struct target *target);
42 static int aarch64_debug_entry(struct target *target);
43 static int aarch64_restore_context(struct target *target, bool bpwp);
44 static int aarch64_set_breakpoint(struct target *target,
45 struct breakpoint *breakpoint, uint8_t matchmode);
46 static int aarch64_set_context_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_hybrid_breakpoint(struct target *target,
49 struct breakpoint *breakpoint);
50 static int aarch64_unset_breakpoint(struct target *target,
51 struct breakpoint *breakpoint);
52 static int aarch64_mmu(struct target *target, int *enabled);
53 static int aarch64_virt2phys(struct target *target,
54 target_addr_t virt, target_addr_t *phys);
55 static int aarch64_read_cpu_memory(struct target *target,
56 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
57
58 static int aarch64_restore_system_control_reg(struct target *target)
59 {
60 enum arm_mode target_mode = ARM_MODE_ANY;
61 int retval = ERROR_OK;
62 uint32_t instr;
63
64 struct aarch64_common *aarch64 = target_to_aarch64(target);
65 struct armv8_common *armv8 = target_to_armv8(target);
66
67 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
68 aarch64->system_control_reg_curr = aarch64->system_control_reg;
69 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
70
71 switch (armv8->arm.core_mode) {
72 case ARMV8_64_EL0T:
73 target_mode = ARMV8_64_EL1H;
74 /* fall through */
75 case ARMV8_64_EL1T:
76 case ARMV8_64_EL1H:
77 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
78 break;
79 case ARMV8_64_EL2T:
80 case ARMV8_64_EL2H:
81 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
82 break;
83 case ARMV8_64_EL3H:
84 case ARMV8_64_EL3T:
85 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
86 break;
87
88 case ARM_MODE_SVC:
89 case ARM_MODE_ABT:
90 case ARM_MODE_FIQ:
91 case ARM_MODE_IRQ:
92 case ARM_MODE_HYP:
93 case ARM_MODE_UND:
94 case ARM_MODE_SYS:
95 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
96 break;
97
98 default:
99 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
100 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
101 return ERROR_FAIL;
102 }
103
104 if (target_mode != ARM_MODE_ANY)
105 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
106
107 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
108 if (retval != ERROR_OK)
109 return retval;
110
111 if (target_mode != ARM_MODE_ANY)
112 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
113 }
114
115 return retval;
116 }
117
118 /* modify system_control_reg in order to enable or disable mmu for :
119 * - virt2phys address conversion
120 * - read or write memory in phys or virt address */
121 static int aarch64_mmu_modify(struct target *target, int enable)
122 {
123 struct aarch64_common *aarch64 = target_to_aarch64(target);
124 struct armv8_common *armv8 = &aarch64->armv8_common;
125 int retval = ERROR_OK;
126 enum arm_mode target_mode = ARM_MODE_ANY;
127 uint32_t instr = 0;
128
129 if (enable) {
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64->system_control_reg & 0x1U)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133 return ERROR_FAIL;
134 }
135 if (!(aarch64->system_control_reg_curr & 0x1U))
136 aarch64->system_control_reg_curr |= 0x1U;
137 } else {
138 if (aarch64->system_control_reg_curr & 0x4U) {
139 /* data cache is active */
140 aarch64->system_control_reg_curr &= ~0x4U;
141 /* flush data cache armv8 function to be called */
142 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
144 }
145 if ((aarch64->system_control_reg_curr & 0x1U)) {
146 aarch64->system_control_reg_curr &= ~0x1U;
147 }
148 }
149
150 switch (armv8->arm.core_mode) {
151 case ARMV8_64_EL0T:
152 target_mode = ARMV8_64_EL1H;
153 /* fall through */
154 case ARMV8_64_EL1T:
155 case ARMV8_64_EL1H:
156 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
157 break;
158 case ARMV8_64_EL2T:
159 case ARMV8_64_EL2H:
160 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
161 break;
162 case ARMV8_64_EL3H:
163 case ARMV8_64_EL3T:
164 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
165 break;
166
167 case ARM_MODE_SVC:
168 case ARM_MODE_ABT:
169 case ARM_MODE_FIQ:
170 case ARM_MODE_IRQ:
171 case ARM_MODE_HYP:
172 case ARM_MODE_UND:
173 case ARM_MODE_SYS:
174 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
175 break;
176
177 default:
178 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
179 break;
180 }
181 if (target_mode != ARM_MODE_ANY)
182 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
183
184 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
185 aarch64->system_control_reg_curr);
186
187 if (target_mode != ARM_MODE_ANY)
188 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
189
190 return retval;
191 }
192
193 /*
194 * Basic debug access, very low level assumes state is saved
195 */
196 static int aarch64_init_debug_access(struct target *target)
197 {
198 struct armv8_common *armv8 = target_to_armv8(target);
199 int retval;
200 uint32_t dummy;
201
202 LOG_DEBUG("%s", target_name(target));
203
204 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
205 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
206 if (retval != ERROR_OK) {
207 LOG_DEBUG("Examine %s failed", "oslock");
208 return retval;
209 }
210
211 /* Clear Sticky Power Down status Bit in PRSR to enable access to
212 the registers in the Core Power Domain */
213 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
214 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
215 if (retval != ERROR_OK)
216 return retval;
217
218 /*
219 * Static CTI configuration:
220 * Channel 0 -> trigger outputs HALT request to PE
221 * Channel 1 -> trigger outputs Resume request to PE
222 * Gate all channel trigger events from entering the CTM
223 */
224
225 /* Enable CTI */
226 retval = arm_cti_enable(armv8->cti, true);
227 /* By default, gate all channel events to and from the CTM */
228 if (retval == ERROR_OK)
229 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
230 /* output halt requests to PE on channel 0 event */
231 if (retval == ERROR_OK)
232 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
233 /* output restart requests to PE on channel 1 event */
234 if (retval == ERROR_OK)
235 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
236 if (retval != ERROR_OK)
237 return retval;
238
239 /* Resync breakpoint registers */
240
241 return ERROR_OK;
242 }
243
244 /* Write to memory mapped registers directly with no cache or mmu handling */
245 static int aarch64_dap_write_memap_register_u32(struct target *target,
246 target_addr_t address,
247 uint32_t value)
248 {
249 int retval;
250 struct armv8_common *armv8 = target_to_armv8(target);
251
252 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
253
254 return retval;
255 }
256
257 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
258 {
259 struct arm_dpm *dpm = &a8->armv8_common.dpm;
260 int retval;
261
262 dpm->arm = &a8->armv8_common.arm;
263 dpm->didr = debug;
264
265 retval = armv8_dpm_setup(dpm);
266 if (retval == ERROR_OK)
267 retval = armv8_dpm_initialize(dpm);
268
269 return retval;
270 }
271
272 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
273 {
274 struct armv8_common *armv8 = target_to_armv8(target);
275 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
276 }
277
278 static int aarch64_check_state_one(struct target *target,
279 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
280 {
281 struct armv8_common *armv8 = target_to_armv8(target);
282 uint32_t prsr;
283 int retval;
284
285 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
286 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
287 if (retval != ERROR_OK)
288 return retval;
289
290 if (p_prsr)
291 *p_prsr = prsr;
292
293 if (p_result)
294 *p_result = (prsr & mask) == (val & mask);
295
296 return ERROR_OK;
297 }
298
299 static int aarch64_wait_halt_one(struct target *target)
300 {
301 int retval = ERROR_OK;
302 uint32_t prsr;
303
304 int64_t then = timeval_ms();
305 for (;;) {
306 int halted;
307
308 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
309 if (retval != ERROR_OK || halted)
310 break;
311
312 if (timeval_ms() > then + 1000) {
313 retval = ERROR_TARGET_TIMEOUT;
314 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
315 break;
316 }
317 }
318 return retval;
319 }
320
321 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
322 {
323 int retval = ERROR_OK;
324 struct target_list *head;
325 struct target *first = NULL;
326
327 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
328
329 foreach_smp_target(head, target->smp_targets) {
330 struct target *curr = head->target;
331 struct armv8_common *armv8 = target_to_armv8(curr);
332
333 if (exc_target && curr == target)
334 continue;
335 if (!target_was_examined(curr))
336 continue;
337 if (curr->state != TARGET_RUNNING)
338 continue;
339
340 /* HACK: mark this target as prepared for halting */
341 curr->debug_reason = DBG_REASON_DBGRQ;
342
343 /* open the gate for channel 0 to let HALT requests pass to the CTM */
344 retval = arm_cti_ungate_channel(armv8->cti, 0);
345 if (retval == ERROR_OK)
346 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
347 if (retval != ERROR_OK)
348 break;
349
350 LOG_DEBUG("target %s prepared", target_name(curr));
351
352 if (!first)
353 first = curr;
354 }
355
356 if (p_first) {
357 if (exc_target && first)
358 *p_first = first;
359 else
360 *p_first = target;
361 }
362
363 return retval;
364 }
365
366 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
367 {
368 int retval = ERROR_OK;
369 struct armv8_common *armv8 = target_to_armv8(target);
370
371 LOG_DEBUG("%s", target_name(target));
372
373 /* allow Halting Debug Mode */
374 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
375 if (retval != ERROR_OK)
376 return retval;
377
378 /* trigger an event on channel 0, this outputs a halt request to the PE */
379 retval = arm_cti_pulse_channel(armv8->cti, 0);
380 if (retval != ERROR_OK)
381 return retval;
382
383 if (mode == HALT_SYNC) {
384 retval = aarch64_wait_halt_one(target);
385 if (retval != ERROR_OK) {
386 if (retval == ERROR_TARGET_TIMEOUT)
387 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
388 return retval;
389 }
390 }
391
392 return ERROR_OK;
393 }
394
395 static int aarch64_halt_smp(struct target *target, bool exc_target)
396 {
397 struct target *next = target;
398 int retval;
399
400 /* prepare halt on all PEs of the group */
401 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
402
403 if (exc_target && next == target)
404 return retval;
405
406 /* halt the target PE */
407 if (retval == ERROR_OK)
408 retval = aarch64_halt_one(next, HALT_LAZY);
409
410 if (retval != ERROR_OK)
411 return retval;
412
413 /* wait for all PEs to halt */
414 int64_t then = timeval_ms();
415 for (;;) {
416 bool all_halted = true;
417 struct target_list *head;
418 struct target *curr;
419
420 foreach_smp_target(head, target->smp_targets) {
421 int halted;
422
423 curr = head->target;
424
425 if (!target_was_examined(curr))
426 continue;
427
428 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
429 if (retval != ERROR_OK || !halted) {
430 all_halted = false;
431 break;
432 }
433 }
434
435 if (all_halted)
436 break;
437
438 if (timeval_ms() > then + 1000) {
439 retval = ERROR_TARGET_TIMEOUT;
440 break;
441 }
442
443 /*
444 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
445 * and it looks like the CTI's are not connected by a common
446 * trigger matrix. It seems that we need to halt one core in each
447 * cluster explicitly. So if we find that a core has not halted
448 * yet, we trigger an explicit halt for the second cluster.
449 */
450 retval = aarch64_halt_one(curr, HALT_LAZY);
451 if (retval != ERROR_OK)
452 break;
453 }
454
455 return retval;
456 }
457
458 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
459 {
460 struct target *gdb_target = NULL;
461 struct target_list *head;
462 struct target *curr;
463
464 if (debug_reason == DBG_REASON_NOTHALTED) {
465 LOG_DEBUG("Halting remaining targets in SMP group");
466 aarch64_halt_smp(target, true);
467 }
468
469 /* poll all targets in the group, but skip the target that serves GDB */
470 foreach_smp_target(head, target->smp_targets) {
471 curr = head->target;
472 /* skip calling context */
473 if (curr == target)
474 continue;
475 if (!target_was_examined(curr))
476 continue;
477 /* skip targets that were already halted */
478 if (curr->state == TARGET_HALTED)
479 continue;
480 /* remember the gdb_service->target */
481 if (curr->gdb_service)
482 gdb_target = curr->gdb_service->target;
483 /* skip it */
484 if (curr == gdb_target)
485 continue;
486
487 /* avoid recursion in aarch64_poll() */
488 curr->smp = 0;
489 aarch64_poll(curr);
490 curr->smp = 1;
491 }
492
493 /* after all targets were updated, poll the gdb serving target */
494 if (gdb_target && gdb_target != target)
495 aarch64_poll(gdb_target);
496
497 return ERROR_OK;
498 }
499
500 /*
501 * Aarch64 Run control
502 */
503
504 static int aarch64_poll(struct target *target)
505 {
506 enum target_state prev_target_state;
507 int retval = ERROR_OK;
508 int halted;
509
510 retval = aarch64_check_state_one(target,
511 PRSR_HALT, PRSR_HALT, &halted, NULL);
512 if (retval != ERROR_OK)
513 return retval;
514
515 if (halted) {
516 prev_target_state = target->state;
517 if (prev_target_state != TARGET_HALTED) {
518 enum target_debug_reason debug_reason = target->debug_reason;
519
520 /* We have a halting debug event */
521 target->state = TARGET_HALTED;
522 LOG_DEBUG("Target %s halted", target_name(target));
523 retval = aarch64_debug_entry(target);
524 if (retval != ERROR_OK)
525 return retval;
526
527 if (target->smp)
528 update_halt_gdb(target, debug_reason);
529
530 if (arm_semihosting(target, &retval) != 0)
531 return retval;
532
533 switch (prev_target_state) {
534 case TARGET_RUNNING:
535 case TARGET_UNKNOWN:
536 case TARGET_RESET:
537 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
538 break;
539 case TARGET_DEBUG_RUNNING:
540 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
541 break;
542 default:
543 break;
544 }
545 }
546 } else
547 target->state = TARGET_RUNNING;
548
549 return retval;
550 }
551
552 static int aarch64_halt(struct target *target)
553 {
554 struct armv8_common *armv8 = target_to_armv8(target);
555 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
556
557 if (target->smp)
558 return aarch64_halt_smp(target, false);
559
560 return aarch64_halt_one(target, HALT_SYNC);
561 }
562
563 static int aarch64_restore_one(struct target *target, int current,
564 uint64_t *address, int handle_breakpoints, int debug_execution)
565 {
566 struct armv8_common *armv8 = target_to_armv8(target);
567 struct arm *arm = &armv8->arm;
568 int retval;
569 uint64_t resume_pc;
570
571 LOG_DEBUG("%s", target_name(target));
572
573 if (!debug_execution)
574 target_free_all_working_areas(target);
575
576 /* current = 1: continue on current pc, otherwise continue at <address> */
577 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
578 if (!current)
579 resume_pc = *address;
580 else
581 *address = resume_pc;
582
583 /* Make sure that the Armv7 gdb thumb fixups does not
584 * kill the return address
585 */
586 switch (arm->core_state) {
587 case ARM_STATE_ARM:
588 resume_pc &= 0xFFFFFFFC;
589 break;
590 case ARM_STATE_AARCH64:
591 resume_pc &= 0xFFFFFFFFFFFFFFFC;
592 break;
593 case ARM_STATE_THUMB:
594 case ARM_STATE_THUMB_EE:
595 /* When the return address is loaded into PC
596 * bit 0 must be 1 to stay in Thumb state
597 */
598 resume_pc |= 0x1;
599 break;
600 case ARM_STATE_JAZELLE:
601 LOG_ERROR("How do I resume into Jazelle state??");
602 return ERROR_FAIL;
603 }
604 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
605 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
606 arm->pc->dirty = true;
607 arm->pc->valid = true;
608
609 /* called it now before restoring context because it uses cpu
610 * register r0 for restoring system control register */
611 retval = aarch64_restore_system_control_reg(target);
612 if (retval == ERROR_OK)
613 retval = aarch64_restore_context(target, handle_breakpoints);
614
615 return retval;
616 }
617
618 /**
619 * prepare single target for restart
620 *
621 *
622 */
623 static int aarch64_prepare_restart_one(struct target *target)
624 {
625 struct armv8_common *armv8 = target_to_armv8(target);
626 int retval;
627 uint32_t dscr;
628 uint32_t tmp;
629
630 LOG_DEBUG("%s", target_name(target));
631
632 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
633 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
634 if (retval != ERROR_OK)
635 return retval;
636
637 if ((dscr & DSCR_ITE) == 0)
638 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
639 if ((dscr & DSCR_ERR) != 0)
640 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
641
642 /* acknowledge a pending CTI halt event */
643 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
644 /*
645 * open the CTI gate for channel 1 so that the restart events
646 * get passed along to all PEs. Also close gate for channel 0
647 * to isolate the PE from halt events.
648 */
649 if (retval == ERROR_OK)
650 retval = arm_cti_ungate_channel(armv8->cti, 1);
651 if (retval == ERROR_OK)
652 retval = arm_cti_gate_channel(armv8->cti, 0);
653
654 /* make sure that DSCR.HDE is set */
655 if (retval == ERROR_OK) {
656 dscr |= DSCR_HDE;
657 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
658 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
659 }
660
661 if (retval == ERROR_OK) {
662 /* clear sticky bits in PRSR, SDR is now 0 */
663 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
664 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
665 }
666
667 return retval;
668 }
669
670 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
671 {
672 struct armv8_common *armv8 = target_to_armv8(target);
673 int retval;
674
675 LOG_DEBUG("%s", target_name(target));
676
677 /* trigger an event on channel 1, generates a restart request to the PE */
678 retval = arm_cti_pulse_channel(armv8->cti, 1);
679 if (retval != ERROR_OK)
680 return retval;
681
682 if (mode == RESTART_SYNC) {
683 int64_t then = timeval_ms();
684 for (;;) {
685 int resumed;
686 /*
687 * if PRSR.SDR is set now, the target did restart, even
688 * if it's now already halted again (e.g. due to breakpoint)
689 */
690 retval = aarch64_check_state_one(target,
691 PRSR_SDR, PRSR_SDR, &resumed, NULL);
692 if (retval != ERROR_OK || resumed)
693 break;
694
695 if (timeval_ms() > then + 1000) {
696 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
697 retval = ERROR_TARGET_TIMEOUT;
698 break;
699 }
700 }
701 }
702
703 if (retval != ERROR_OK)
704 return retval;
705
706 target->debug_reason = DBG_REASON_NOTHALTED;
707 target->state = TARGET_RUNNING;
708
709 return ERROR_OK;
710 }
711
712 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
713 {
714 int retval;
715
716 LOG_DEBUG("%s", target_name(target));
717
718 retval = aarch64_prepare_restart_one(target);
719 if (retval == ERROR_OK)
720 retval = aarch64_do_restart_one(target, mode);
721
722 return retval;
723 }
724
725 /*
726 * prepare all but the current target for restart
727 */
728 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
729 {
730 int retval = ERROR_OK;
731 struct target_list *head;
732 struct target *first = NULL;
733 uint64_t address;
734
735 foreach_smp_target(head, target->smp_targets) {
736 struct target *curr = head->target;
737
738 /* skip calling target */
739 if (curr == target)
740 continue;
741 if (!target_was_examined(curr))
742 continue;
743 if (curr->state != TARGET_HALTED)
744 continue;
745
746 /* resume at current address, not in step mode */
747 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
748 if (retval == ERROR_OK)
749 retval = aarch64_prepare_restart_one(curr);
750 if (retval != ERROR_OK) {
751 LOG_ERROR("failed to restore target %s", target_name(curr));
752 break;
753 }
754 /* remember the first valid target in the group */
755 if (!first)
756 first = curr;
757 }
758
759 if (p_first)
760 *p_first = first;
761
762 return retval;
763 }
764
765
766 static int aarch64_step_restart_smp(struct target *target)
767 {
768 int retval = ERROR_OK;
769 struct target_list *head;
770 struct target *first = NULL;
771
772 LOG_DEBUG("%s", target_name(target));
773
774 retval = aarch64_prep_restart_smp(target, 0, &first);
775 if (retval != ERROR_OK)
776 return retval;
777
778 if (first)
779 retval = aarch64_do_restart_one(first, RESTART_LAZY);
780 if (retval != ERROR_OK) {
781 LOG_DEBUG("error restarting target %s", target_name(first));
782 return retval;
783 }
784
785 int64_t then = timeval_ms();
786 for (;;) {
787 struct target *curr = target;
788 bool all_resumed = true;
789
790 foreach_smp_target(head, target->smp_targets) {
791 uint32_t prsr;
792 int resumed;
793
794 curr = head->target;
795
796 if (curr == target)
797 continue;
798
799 if (!target_was_examined(curr))
800 continue;
801
802 retval = aarch64_check_state_one(curr,
803 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
804 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
805 all_resumed = false;
806 break;
807 }
808
809 if (curr->state != TARGET_RUNNING) {
810 curr->state = TARGET_RUNNING;
811 curr->debug_reason = DBG_REASON_NOTHALTED;
812 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
813 }
814 }
815
816 if (all_resumed)
817 break;
818
819 if (timeval_ms() > then + 1000) {
820 LOG_ERROR("%s: timeout waiting for target resume", __func__);
821 retval = ERROR_TARGET_TIMEOUT;
822 break;
823 }
824 /*
825 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
826 * and it looks like the CTI's are not connected by a common
827 * trigger matrix. It seems that we need to halt one core in each
828 * cluster explicitly. So if we find that a core has not halted
829 * yet, we trigger an explicit resume for the second cluster.
830 */
831 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
832 if (retval != ERROR_OK)
833 break;
834 }
835
836 return retval;
837 }
838
839 static int aarch64_resume(struct target *target, int current,
840 target_addr_t address, int handle_breakpoints, int debug_execution)
841 {
842 int retval = 0;
843 uint64_t addr = address;
844
845 struct armv8_common *armv8 = target_to_armv8(target);
846 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
847
848 if (target->state != TARGET_HALTED)
849 return ERROR_TARGET_NOT_HALTED;
850
851 /*
852 * If this target is part of a SMP group, prepare the others
853 * targets for resuming. This involves restoring the complete
854 * target register context and setting up CTI gates to accept
855 * resume events from the trigger matrix.
856 */
857 if (target->smp) {
858 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
859 if (retval != ERROR_OK)
860 return retval;
861 }
862
863 /* all targets prepared, restore and restart the current target */
864 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
865 debug_execution);
866 if (retval == ERROR_OK)
867 retval = aarch64_restart_one(target, RESTART_SYNC);
868 if (retval != ERROR_OK)
869 return retval;
870
871 if (target->smp) {
872 int64_t then = timeval_ms();
873 for (;;) {
874 struct target *curr = target;
875 struct target_list *head;
876 bool all_resumed = true;
877
878 foreach_smp_target(head, target->smp_targets) {
879 uint32_t prsr;
880 int resumed;
881
882 curr = head->target;
883 if (curr == target)
884 continue;
885 if (!target_was_examined(curr))
886 continue;
887
888 retval = aarch64_check_state_one(curr,
889 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
890 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
891 all_resumed = false;
892 break;
893 }
894
895 if (curr->state != TARGET_RUNNING) {
896 curr->state = TARGET_RUNNING;
897 curr->debug_reason = DBG_REASON_NOTHALTED;
898 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
899 }
900 }
901
902 if (all_resumed)
903 break;
904
905 if (timeval_ms() > then + 1000) {
906 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
907 retval = ERROR_TARGET_TIMEOUT;
908 break;
909 }
910
911 /*
912 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
913 * and it looks like the CTI's are not connected by a common
914 * trigger matrix. It seems that we need to halt one core in each
915 * cluster explicitly. So if we find that a core has not halted
916 * yet, we trigger an explicit resume for the second cluster.
917 */
918 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
919 if (retval != ERROR_OK)
920 break;
921 }
922 }
923
924 if (retval != ERROR_OK)
925 return retval;
926
927 target->debug_reason = DBG_REASON_NOTHALTED;
928
929 if (!debug_execution) {
930 target->state = TARGET_RUNNING;
931 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
932 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
933 } else {
934 target->state = TARGET_DEBUG_RUNNING;
935 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
936 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
937 }
938
939 return ERROR_OK;
940 }
941
942 static int aarch64_debug_entry(struct target *target)
943 {
944 int retval = ERROR_OK;
945 struct armv8_common *armv8 = target_to_armv8(target);
946 struct arm_dpm *dpm = &armv8->dpm;
947 enum arm_state core_state;
948 uint32_t dscr;
949
950 /* make sure to clear all sticky errors */
951 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
952 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
953 if (retval == ERROR_OK)
954 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
956 if (retval == ERROR_OK)
957 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
958
959 if (retval != ERROR_OK)
960 return retval;
961
962 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
963
964 dpm->dscr = dscr;
965 core_state = armv8_dpm_get_core_state(dpm);
966 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
967 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
968
969 /* close the CTI gate for all events */
970 if (retval == ERROR_OK)
971 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
972 /* discard async exceptions */
973 if (retval == ERROR_OK)
974 retval = dpm->instr_cpsr_sync(dpm);
975 if (retval != ERROR_OK)
976 return retval;
977
978 /* Examine debug reason */
979 armv8_dpm_report_dscr(dpm, dscr);
980
981 /* save the memory address that triggered the watchpoint */
982 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
983 uint32_t tmp;
984
985 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
986 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
987 if (retval != ERROR_OK)
988 return retval;
989 target_addr_t edwar = tmp;
990
991 /* EDWAR[63:32] has unknown content in aarch32 state */
992 if (core_state == ARM_STATE_AARCH64) {
993 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
994 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
995 if (retval != ERROR_OK)
996 return retval;
997 edwar |= ((target_addr_t)tmp) << 32;
998 }
999
1000 armv8->dpm.wp_addr = edwar;
1001 }
1002
1003 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1004
1005 if (retval == ERROR_OK && armv8->post_debug_entry)
1006 retval = armv8->post_debug_entry(target);
1007
1008 return retval;
1009 }
1010
1011 static int aarch64_post_debug_entry(struct target *target)
1012 {
1013 struct aarch64_common *aarch64 = target_to_aarch64(target);
1014 struct armv8_common *armv8 = &aarch64->armv8_common;
1015 int retval;
1016 enum arm_mode target_mode = ARM_MODE_ANY;
1017 uint32_t instr;
1018
1019 switch (armv8->arm.core_mode) {
1020 case ARMV8_64_EL0T:
1021 target_mode = ARMV8_64_EL1H;
1022 /* fall through */
1023 case ARMV8_64_EL1T:
1024 case ARMV8_64_EL1H:
1025 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1026 break;
1027 case ARMV8_64_EL2T:
1028 case ARMV8_64_EL2H:
1029 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1030 break;
1031 case ARMV8_64_EL3H:
1032 case ARMV8_64_EL3T:
1033 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1034 break;
1035
1036 case ARM_MODE_SVC:
1037 case ARM_MODE_ABT:
1038 case ARM_MODE_FIQ:
1039 case ARM_MODE_IRQ:
1040 case ARM_MODE_HYP:
1041 case ARM_MODE_UND:
1042 case ARM_MODE_SYS:
1043 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1044 break;
1045
1046 default:
1047 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1048 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1049 return ERROR_FAIL;
1050 }
1051
1052 if (target_mode != ARM_MODE_ANY)
1053 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1054
1055 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 if (target_mode != ARM_MODE_ANY)
1060 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1061
1062 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1063 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1064
1065 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1066 armv8_identify_cache(armv8);
1067 armv8_read_mpidr(armv8);
1068 }
1069
1070 armv8->armv8_mmu.mmu_enabled =
1071 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1072 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1073 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1074 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1075 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1076 return ERROR_OK;
1077 }
1078
1079 /*
1080 * single-step a target
1081 */
1082 static int aarch64_step(struct target *target, int current, target_addr_t address,
1083 int handle_breakpoints)
1084 {
1085 struct armv8_common *armv8 = target_to_armv8(target);
1086 struct aarch64_common *aarch64 = target_to_aarch64(target);
1087 int saved_retval = ERROR_OK;
1088 int retval;
1089 uint32_t edecr;
1090
1091 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1092
1093 if (target->state != TARGET_HALTED) {
1094 LOG_WARNING("target not halted");
1095 return ERROR_TARGET_NOT_HALTED;
1096 }
1097
1098 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1099 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1100 /* make sure EDECR.SS is not set when restoring the register */
1101
1102 if (retval == ERROR_OK) {
1103 edecr &= ~0x4;
1104 /* set EDECR.SS to enter hardware step mode */
1105 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1106 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1107 }
1108 /* disable interrupts while stepping */
1109 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1110 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1111 /* bail out if stepping setup has failed */
1112 if (retval != ERROR_OK)
1113 return retval;
1114
1115 if (target->smp && (current == 1)) {
1116 /*
1117 * isolate current target so that it doesn't get resumed
1118 * together with the others
1119 */
1120 retval = arm_cti_gate_channel(armv8->cti, 1);
1121 /* resume all other targets in the group */
1122 if (retval == ERROR_OK)
1123 retval = aarch64_step_restart_smp(target);
1124 if (retval != ERROR_OK) {
1125 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1126 return retval;
1127 }
1128 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1129 }
1130
1131 /* all other targets running, restore and restart the current target */
1132 retval = aarch64_restore_one(target, current, &address, 0, 0);
1133 if (retval == ERROR_OK)
1134 retval = aarch64_restart_one(target, RESTART_LAZY);
1135
1136 if (retval != ERROR_OK)
1137 return retval;
1138
1139 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1140 if (!handle_breakpoints)
1141 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1142
1143 int64_t then = timeval_ms();
1144 for (;;) {
1145 int stepped;
1146 uint32_t prsr;
1147
1148 retval = aarch64_check_state_one(target,
1149 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1150 if (retval != ERROR_OK || stepped)
1151 break;
1152
1153 if (timeval_ms() > then + 100) {
1154 LOG_ERROR("timeout waiting for target %s halt after step",
1155 target_name(target));
1156 retval = ERROR_TARGET_TIMEOUT;
1157 break;
1158 }
1159 }
1160
1161 /*
1162 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1163 * causes a timeout. The core takes the step but doesn't complete it and so
1164 * debug state is never entered. However, you can manually halt the core
1165 * as an external debug even is also a WFI wakeup event.
1166 */
1167 if (retval == ERROR_TARGET_TIMEOUT)
1168 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1169
1170 /* restore EDECR */
1171 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1172 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1173 if (retval != ERROR_OK)
1174 return retval;
1175
1176 /* restore interrupts */
1177 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1178 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1179 if (retval != ERROR_OK)
1180 return ERROR_OK;
1181 }
1182
1183 if (saved_retval != ERROR_OK)
1184 return saved_retval;
1185
1186 return ERROR_OK;
1187 }
1188
1189 static int aarch64_restore_context(struct target *target, bool bpwp)
1190 {
1191 struct armv8_common *armv8 = target_to_armv8(target);
1192 struct arm *arm = &armv8->arm;
1193
1194 int retval;
1195
1196 LOG_DEBUG("%s", target_name(target));
1197
1198 if (armv8->pre_restore_context)
1199 armv8->pre_restore_context(target);
1200
1201 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1202 if (retval == ERROR_OK) {
1203 /* registers are now invalid */
1204 register_cache_invalidate(arm->core_cache);
1205 register_cache_invalidate(arm->core_cache->next);
1206 }
1207
1208 return retval;
1209 }
1210
1211 /*
1212 * Cortex-A8 Breakpoint and watchpoint functions
1213 */
1214
1215 /* Setup hardware Breakpoint Register Pair */
1216 static int aarch64_set_breakpoint(struct target *target,
1217 struct breakpoint *breakpoint, uint8_t matchmode)
1218 {
1219 int retval;
1220 int brp_i = 0;
1221 uint32_t control;
1222 uint8_t byte_addr_select = 0x0F;
1223 struct aarch64_common *aarch64 = target_to_aarch64(target);
1224 struct armv8_common *armv8 = &aarch64->armv8_common;
1225 struct aarch64_brp *brp_list = aarch64->brp_list;
1226
1227 if (breakpoint->is_set) {
1228 LOG_WARNING("breakpoint already set");
1229 return ERROR_OK;
1230 }
1231
1232 if (breakpoint->type == BKPT_HARD) {
1233 int64_t bpt_value;
1234 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1235 brp_i++;
1236 if (brp_i >= aarch64->brp_num) {
1237 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1238 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1239 }
1240 breakpoint_hw_set(breakpoint, brp_i);
1241 if (breakpoint->length == 2)
1242 byte_addr_select = (3 << (breakpoint->address & 0x02));
1243 control = ((matchmode & 0x7) << 20)
1244 | (1 << 13)
1245 | (byte_addr_select << 5)
1246 | (3 << 1) | 1;
1247 brp_list[brp_i].used = 1;
1248 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1249 brp_list[brp_i].control = control;
1250 bpt_value = brp_list[brp_i].value;
1251
1252 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1253 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1254 (uint32_t)(bpt_value & 0xFFFFFFFF));
1255 if (retval != ERROR_OK)
1256 return retval;
1257 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1258 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1259 (uint32_t)(bpt_value >> 32));
1260 if (retval != ERROR_OK)
1261 return retval;
1262
1263 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1264 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1265 brp_list[brp_i].control);
1266 if (retval != ERROR_OK)
1267 return retval;
1268 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1269 brp_list[brp_i].control,
1270 brp_list[brp_i].value);
1271
1272 } else if (breakpoint->type == BKPT_SOFT) {
1273 uint32_t opcode;
1274 uint8_t code[4];
1275
1276 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1277 opcode = ARMV8_HLT(11);
1278
1279 if (breakpoint->length != 4)
1280 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1281 } else {
1282 /**
1283 * core_state is ARM_STATE_ARM
1284 * in that case the opcode depends on breakpoint length:
1285 * - if length == 4 => A32 opcode
1286 * - if length == 2 => T32 opcode
1287 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1288 * in that case the length should be changed from 3 to 4 bytes
1289 **/
1290 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1291 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1292
1293 if (breakpoint->length == 3)
1294 breakpoint->length = 4;
1295 }
1296
1297 buf_set_u32(code, 0, 32, opcode);
1298
1299 retval = target_read_memory(target,
1300 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1301 breakpoint->length, 1,
1302 breakpoint->orig_instr);
1303 if (retval != ERROR_OK)
1304 return retval;
1305
1306 armv8_cache_d_inner_flush_virt(armv8,
1307 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1308 breakpoint->length);
1309
1310 retval = target_write_memory(target,
1311 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1312 breakpoint->length, 1, code);
1313 if (retval != ERROR_OK)
1314 return retval;
1315
1316 armv8_cache_d_inner_flush_virt(armv8,
1317 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1318 breakpoint->length);
1319
1320 armv8_cache_i_inner_inval_virt(armv8,
1321 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1322 breakpoint->length);
1323
1324 breakpoint->is_set = true;
1325 }
1326
1327 /* Ensure that halting debug mode is enable */
1328 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1329 if (retval != ERROR_OK) {
1330 LOG_DEBUG("Failed to set DSCR.HDE");
1331 return retval;
1332 }
1333
1334 return ERROR_OK;
1335 }
1336
1337 static int aarch64_set_context_breakpoint(struct target *target,
1338 struct breakpoint *breakpoint, uint8_t matchmode)
1339 {
1340 int retval = ERROR_FAIL;
1341 int brp_i = 0;
1342 uint32_t control;
1343 uint8_t byte_addr_select = 0x0F;
1344 struct aarch64_common *aarch64 = target_to_aarch64(target);
1345 struct armv8_common *armv8 = &aarch64->armv8_common;
1346 struct aarch64_brp *brp_list = aarch64->brp_list;
1347
1348 if (breakpoint->is_set) {
1349 LOG_WARNING("breakpoint already set");
1350 return retval;
1351 }
1352 /*check available context BRPs*/
1353 while ((brp_list[brp_i].used ||
1354 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1355 brp_i++;
1356
1357 if (brp_i >= aarch64->brp_num) {
1358 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1359 return ERROR_FAIL;
1360 }
1361
1362 breakpoint_hw_set(breakpoint, brp_i);
1363 control = ((matchmode & 0x7) << 20)
1364 | (1 << 13)
1365 | (byte_addr_select << 5)
1366 | (3 << 1) | 1;
1367 brp_list[brp_i].used = 1;
1368 brp_list[brp_i].value = (breakpoint->asid);
1369 brp_list[brp_i].control = control;
1370 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1371 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1372 brp_list[brp_i].value);
1373 if (retval != ERROR_OK)
1374 return retval;
1375 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1376 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1377 brp_list[brp_i].control);
1378 if (retval != ERROR_OK)
1379 return retval;
1380 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1381 brp_list[brp_i].control,
1382 brp_list[brp_i].value);
1383 return ERROR_OK;
1384
1385 }
1386
1387 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1388 {
1389 int retval = ERROR_FAIL;
1390 int brp_1 = 0; /* holds the contextID pair */
1391 int brp_2 = 0; /* holds the IVA pair */
1392 uint32_t control_ctx, control_iva;
1393 uint8_t ctx_byte_addr_select = 0x0F;
1394 uint8_t iva_byte_addr_select = 0x0F;
1395 uint8_t ctx_machmode = 0x03;
1396 uint8_t iva_machmode = 0x01;
1397 struct aarch64_common *aarch64 = target_to_aarch64(target);
1398 struct armv8_common *armv8 = &aarch64->armv8_common;
1399 struct aarch64_brp *brp_list = aarch64->brp_list;
1400
1401 if (breakpoint->is_set) {
1402 LOG_WARNING("breakpoint already set");
1403 return retval;
1404 }
1405 /*check available context BRPs*/
1406 while ((brp_list[brp_1].used ||
1407 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1408 brp_1++;
1409
1410 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1411 if (brp_1 >= aarch64->brp_num) {
1412 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1413 return ERROR_FAIL;
1414 }
1415
1416 while ((brp_list[brp_2].used ||
1417 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1418 brp_2++;
1419
1420 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1421 if (brp_2 >= aarch64->brp_num) {
1422 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1423 return ERROR_FAIL;
1424 }
1425
1426 breakpoint_hw_set(breakpoint, brp_1);
1427 breakpoint->linked_brp = brp_2;
1428 control_ctx = ((ctx_machmode & 0x7) << 20)
1429 | (brp_2 << 16)
1430 | (0 << 14)
1431 | (ctx_byte_addr_select << 5)
1432 | (3 << 1) | 1;
1433 brp_list[brp_1].used = 1;
1434 brp_list[brp_1].value = (breakpoint->asid);
1435 brp_list[brp_1].control = control_ctx;
1436 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1437 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1438 brp_list[brp_1].value);
1439 if (retval != ERROR_OK)
1440 return retval;
1441 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1442 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1443 brp_list[brp_1].control);
1444 if (retval != ERROR_OK)
1445 return retval;
1446
1447 control_iva = ((iva_machmode & 0x7) << 20)
1448 | (brp_1 << 16)
1449 | (1 << 13)
1450 | (iva_byte_addr_select << 5)
1451 | (3 << 1) | 1;
1452 brp_list[brp_2].used = 1;
1453 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1454 brp_list[brp_2].control = control_iva;
1455 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1456 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1457 brp_list[brp_2].value & 0xFFFFFFFF);
1458 if (retval != ERROR_OK)
1459 return retval;
1460 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1461 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1462 brp_list[brp_2].value >> 32);
1463 if (retval != ERROR_OK)
1464 return retval;
1465 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1466 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1467 brp_list[brp_2].control);
1468 if (retval != ERROR_OK)
1469 return retval;
1470
1471 return ERROR_OK;
1472 }
1473
1474 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1475 {
1476 int retval;
1477 struct aarch64_common *aarch64 = target_to_aarch64(target);
1478 struct armv8_common *armv8 = &aarch64->armv8_common;
1479 struct aarch64_brp *brp_list = aarch64->brp_list;
1480
1481 if (!breakpoint->is_set) {
1482 LOG_WARNING("breakpoint not set");
1483 return ERROR_OK;
1484 }
1485
1486 if (breakpoint->type == BKPT_HARD) {
1487 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1488 int brp_i = breakpoint->number;
1489 int brp_j = breakpoint->linked_brp;
1490 if (brp_i >= aarch64->brp_num) {
1491 LOG_DEBUG("Invalid BRP number in breakpoint");
1492 return ERROR_OK;
1493 }
1494 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1495 brp_list[brp_i].control, brp_list[brp_i].value);
1496 brp_list[brp_i].used = 0;
1497 brp_list[brp_i].value = 0;
1498 brp_list[brp_i].control = 0;
1499 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1500 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1501 brp_list[brp_i].control);
1502 if (retval != ERROR_OK)
1503 return retval;
1504 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1505 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1506 (uint32_t)brp_list[brp_i].value);
1507 if (retval != ERROR_OK)
1508 return retval;
1509 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1510 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1511 (uint32_t)brp_list[brp_i].value);
1512 if (retval != ERROR_OK)
1513 return retval;
1514 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1515 LOG_DEBUG("Invalid BRP number in breakpoint");
1516 return ERROR_OK;
1517 }
1518 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1519 brp_list[brp_j].control, brp_list[brp_j].value);
1520 brp_list[brp_j].used = 0;
1521 brp_list[brp_j].value = 0;
1522 brp_list[brp_j].control = 0;
1523 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1524 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1525 brp_list[brp_j].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1528 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1529 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1530 (uint32_t)brp_list[brp_j].value);
1531 if (retval != ERROR_OK)
1532 return retval;
1533 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1534 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1535 (uint32_t)brp_list[brp_j].value);
1536 if (retval != ERROR_OK)
1537 return retval;
1538
1539 breakpoint->linked_brp = 0;
1540 breakpoint->is_set = false;
1541 return ERROR_OK;
1542
1543 } else {
1544 int brp_i = breakpoint->number;
1545 if (brp_i >= aarch64->brp_num) {
1546 LOG_DEBUG("Invalid BRP number in breakpoint");
1547 return ERROR_OK;
1548 }
1549 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1550 brp_list[brp_i].control, brp_list[brp_i].value);
1551 brp_list[brp_i].used = 0;
1552 brp_list[brp_i].value = 0;
1553 brp_list[brp_i].control = 0;
1554 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1555 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1556 brp_list[brp_i].control);
1557 if (retval != ERROR_OK)
1558 return retval;
1559 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1560 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1561 brp_list[brp_i].value);
1562 if (retval != ERROR_OK)
1563 return retval;
1564
1565 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1566 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1567 (uint32_t)brp_list[brp_i].value);
1568 if (retval != ERROR_OK)
1569 return retval;
1570 breakpoint->is_set = false;
1571 return ERROR_OK;
1572 }
1573 } else {
1574 /* restore original instruction (kept in target endianness) */
1575
1576 armv8_cache_d_inner_flush_virt(armv8,
1577 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1578 breakpoint->length);
1579
1580 if (breakpoint->length == 4) {
1581 retval = target_write_memory(target,
1582 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1583 4, 1, breakpoint->orig_instr);
1584 if (retval != ERROR_OK)
1585 return retval;
1586 } else {
1587 retval = target_write_memory(target,
1588 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1589 2, 1, breakpoint->orig_instr);
1590 if (retval != ERROR_OK)
1591 return retval;
1592 }
1593
1594 armv8_cache_d_inner_flush_virt(armv8,
1595 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1596 breakpoint->length);
1597
1598 armv8_cache_i_inner_inval_virt(armv8,
1599 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1600 breakpoint->length);
1601 }
1602 breakpoint->is_set = false;
1603
1604 return ERROR_OK;
1605 }
1606
1607 static int aarch64_add_breakpoint(struct target *target,
1608 struct breakpoint *breakpoint)
1609 {
1610 struct aarch64_common *aarch64 = target_to_aarch64(target);
1611
1612 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1613 LOG_INFO("no hardware breakpoint available");
1614 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1615 }
1616
1617 if (breakpoint->type == BKPT_HARD)
1618 aarch64->brp_num_available--;
1619
1620 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1621 }
1622
1623 static int aarch64_add_context_breakpoint(struct target *target,
1624 struct breakpoint *breakpoint)
1625 {
1626 struct aarch64_common *aarch64 = target_to_aarch64(target);
1627
1628 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1629 LOG_INFO("no hardware breakpoint available");
1630 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1631 }
1632
1633 if (breakpoint->type == BKPT_HARD)
1634 aarch64->brp_num_available--;
1635
1636 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1637 }
1638
1639 static int aarch64_add_hybrid_breakpoint(struct target *target,
1640 struct breakpoint *breakpoint)
1641 {
1642 struct aarch64_common *aarch64 = target_to_aarch64(target);
1643
1644 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1645 LOG_INFO("no hardware breakpoint available");
1646 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1647 }
1648
1649 if (breakpoint->type == BKPT_HARD)
1650 aarch64->brp_num_available--;
1651
1652 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1653 }
1654
1655 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1656 {
1657 struct aarch64_common *aarch64 = target_to_aarch64(target);
1658
1659 #if 0
1660 /* It is perfectly possible to remove breakpoints while the target is running */
1661 if (target->state != TARGET_HALTED) {
1662 LOG_WARNING("target not halted");
1663 return ERROR_TARGET_NOT_HALTED;
1664 }
1665 #endif
1666
1667 if (breakpoint->is_set) {
1668 aarch64_unset_breakpoint(target, breakpoint);
1669 if (breakpoint->type == BKPT_HARD)
1670 aarch64->brp_num_available++;
1671 }
1672
1673 return ERROR_OK;
1674 }
1675
1676 /* Setup hardware Watchpoint Register Pair */
1677 static int aarch64_set_watchpoint(struct target *target,
1678 struct watchpoint *watchpoint)
1679 {
1680 int retval;
1681 int wp_i = 0;
1682 uint32_t control, offset, length;
1683 struct aarch64_common *aarch64 = target_to_aarch64(target);
1684 struct armv8_common *armv8 = &aarch64->armv8_common;
1685 struct aarch64_brp *wp_list = aarch64->wp_list;
1686
1687 if (watchpoint->is_set) {
1688 LOG_WARNING("watchpoint already set");
1689 return ERROR_OK;
1690 }
1691
1692 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1693 wp_i++;
1694 if (wp_i >= aarch64->wp_num) {
1695 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1696 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1697 }
1698
1699 control = (1 << 0) /* enable */
1700 | (3 << 1) /* both user and privileged access */
1701 | (1 << 13); /* higher mode control */
1702
1703 switch (watchpoint->rw) {
1704 case WPT_READ:
1705 control |= 1 << 3;
1706 break;
1707 case WPT_WRITE:
1708 control |= 2 << 3;
1709 break;
1710 case WPT_ACCESS:
1711 control |= 3 << 3;
1712 break;
1713 }
1714
1715 /* Match up to 8 bytes. */
1716 offset = watchpoint->address & 7;
1717 length = watchpoint->length;
1718 if (offset + length > sizeof(uint64_t)) {
1719 length = sizeof(uint64_t) - offset;
1720 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1721 }
1722 for (; length > 0; offset++, length--)
1723 control |= (1 << offset) << 5;
1724
1725 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1726 wp_list[wp_i].control = control;
1727
1728 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1729 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1730 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1731 if (retval != ERROR_OK)
1732 return retval;
1733 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1734 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1735 (uint32_t)(wp_list[wp_i].value >> 32));
1736 if (retval != ERROR_OK)
1737 return retval;
1738
1739 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1740 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1741 control);
1742 if (retval != ERROR_OK)
1743 return retval;
1744 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1745 wp_list[wp_i].control, wp_list[wp_i].value);
1746
1747 /* Ensure that halting debug mode is enable */
1748 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1749 if (retval != ERROR_OK) {
1750 LOG_DEBUG("Failed to set DSCR.HDE");
1751 return retval;
1752 }
1753
1754 wp_list[wp_i].used = 1;
1755 watchpoint_set(watchpoint, wp_i);
1756
1757 return ERROR_OK;
1758 }
1759
1760 /* Clear hardware Watchpoint Register Pair */
1761 static int aarch64_unset_watchpoint(struct target *target,
1762 struct watchpoint *watchpoint)
1763 {
1764 int retval;
1765 struct aarch64_common *aarch64 = target_to_aarch64(target);
1766 struct armv8_common *armv8 = &aarch64->armv8_common;
1767 struct aarch64_brp *wp_list = aarch64->wp_list;
1768
1769 if (!watchpoint->is_set) {
1770 LOG_WARNING("watchpoint not set");
1771 return ERROR_OK;
1772 }
1773
1774 int wp_i = watchpoint->number;
1775 if (wp_i >= aarch64->wp_num) {
1776 LOG_DEBUG("Invalid WP number in watchpoint");
1777 return ERROR_OK;
1778 }
1779 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1780 wp_list[wp_i].control, wp_list[wp_i].value);
1781 wp_list[wp_i].used = 0;
1782 wp_list[wp_i].value = 0;
1783 wp_list[wp_i].control = 0;
1784 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1785 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1786 wp_list[wp_i].control);
1787 if (retval != ERROR_OK)
1788 return retval;
1789 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1790 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1791 wp_list[wp_i].value);
1792 if (retval != ERROR_OK)
1793 return retval;
1794
1795 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1796 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1797 (uint32_t)wp_list[wp_i].value);
1798 if (retval != ERROR_OK)
1799 return retval;
1800 watchpoint->is_set = false;
1801
1802 return ERROR_OK;
1803 }
1804
1805 static int aarch64_add_watchpoint(struct target *target,
1806 struct watchpoint *watchpoint)
1807 {
1808 int retval;
1809 struct aarch64_common *aarch64 = target_to_aarch64(target);
1810
1811 if (aarch64->wp_num_available < 1) {
1812 LOG_INFO("no hardware watchpoint available");
1813 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1814 }
1815
1816 retval = aarch64_set_watchpoint(target, watchpoint);
1817 if (retval == ERROR_OK)
1818 aarch64->wp_num_available--;
1819
1820 return retval;
1821 }
1822
1823 static int aarch64_remove_watchpoint(struct target *target,
1824 struct watchpoint *watchpoint)
1825 {
1826 struct aarch64_common *aarch64 = target_to_aarch64(target);
1827
1828 if (watchpoint->is_set) {
1829 aarch64_unset_watchpoint(target, watchpoint);
1830 aarch64->wp_num_available++;
1831 }
1832
1833 return ERROR_OK;
1834 }
1835
1836 /**
1837 * find out which watchpoint hits
1838 * get exception address and compare the address to watchpoints
1839 */
1840 static int aarch64_hit_watchpoint(struct target *target,
1841 struct watchpoint **hit_watchpoint)
1842 {
1843 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1844 return ERROR_FAIL;
1845
1846 struct armv8_common *armv8 = target_to_armv8(target);
1847
1848 target_addr_t exception_address;
1849 struct watchpoint *wp;
1850
1851 exception_address = armv8->dpm.wp_addr;
1852
1853 if (exception_address == 0xFFFFFFFF)
1854 return ERROR_FAIL;
1855
1856 for (wp = target->watchpoints; wp; wp = wp->next)
1857 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1858 *hit_watchpoint = wp;
1859 return ERROR_OK;
1860 }
1861
1862 return ERROR_FAIL;
1863 }
1864
1865 /*
1866 * Cortex-A8 Reset functions
1867 */
1868
1869 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1870 {
1871 struct armv8_common *armv8 = target_to_armv8(target);
1872 uint32_t edecr;
1873 int retval;
1874
1875 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1876 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1877 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1878 if (retval != ERROR_OK)
1879 return retval;
1880
1881 if (enable)
1882 edecr |= ECR_RCE;
1883 else
1884 edecr &= ~ECR_RCE;
1885
1886 return mem_ap_write_atomic_u32(armv8->debug_ap,
1887 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1888 }
1889
1890 static int aarch64_clear_reset_catch(struct target *target)
1891 {
1892 struct armv8_common *armv8 = target_to_armv8(target);
1893 uint32_t edesr;
1894 int retval;
1895 bool was_triggered;
1896
1897 /* check if Reset Catch debug event triggered as expected */
1898 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1899 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1900 if (retval != ERROR_OK)
1901 return retval;
1902
1903 was_triggered = !!(edesr & ESR_RC);
1904 LOG_DEBUG("Reset Catch debug event %s",
1905 was_triggered ? "triggered" : "NOT triggered!");
1906
1907 if (was_triggered) {
1908 /* clear pending Reset Catch debug event */
1909 edesr &= ~ESR_RC;
1910 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1911 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1912 if (retval != ERROR_OK)
1913 return retval;
1914 }
1915
1916 return ERROR_OK;
1917 }
1918
1919 static int aarch64_assert_reset(struct target *target)
1920 {
1921 struct armv8_common *armv8 = target_to_armv8(target);
1922 enum reset_types reset_config = jtag_get_reset_config();
1923 int retval;
1924
1925 LOG_DEBUG(" ");
1926
1927 /* Issue some kind of warm reset. */
1928 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1929 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1930 else if (reset_config & RESET_HAS_SRST) {
1931 bool srst_asserted = false;
1932
1933 if (target->reset_halt && !(reset_config & RESET_SRST_PULLS_TRST)) {
1934 if (target_was_examined(target)) {
1935
1936 if (reset_config & RESET_SRST_NO_GATING) {
1937 /*
1938 * SRST needs to be asserted *before* Reset Catch
1939 * debug event can be set up.
1940 */
1941 adapter_assert_reset();
1942 srst_asserted = true;
1943 }
1944
1945 /* make sure to clear all sticky errors */
1946 mem_ap_write_atomic_u32(armv8->debug_ap,
1947 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1948
1949 /* set up Reset Catch debug event to halt the CPU after reset */
1950 retval = aarch64_enable_reset_catch(target, true);
1951 if (retval != ERROR_OK)
1952 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1953 target_name(target));
1954 } else {
1955 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1956 target_name(target));
1957 }
1958 }
1959
1960 /* REVISIT handle "pulls" cases, if there's
1961 * hardware that needs them to work.
1962 */
1963 if (!srst_asserted)
1964 adapter_assert_reset();
1965 } else {
1966 LOG_ERROR("%s: how to reset?", target_name(target));
1967 return ERROR_FAIL;
1968 }
1969
1970 /* registers are now invalid */
1971 if (target_was_examined(target)) {
1972 register_cache_invalidate(armv8->arm.core_cache);
1973 register_cache_invalidate(armv8->arm.core_cache->next);
1974 }
1975
1976 target->state = TARGET_RESET;
1977
1978 return ERROR_OK;
1979 }
1980
1981 static int aarch64_deassert_reset(struct target *target)
1982 {
1983 int retval;
1984
1985 LOG_DEBUG(" ");
1986
1987 /* be certain SRST is off */
1988 adapter_deassert_reset();
1989
1990 if (!target_was_examined(target))
1991 return ERROR_OK;
1992
1993 retval = aarch64_init_debug_access(target);
1994 if (retval != ERROR_OK)
1995 return retval;
1996
1997 retval = aarch64_poll(target);
1998 if (retval != ERROR_OK)
1999 return retval;
2000
2001 if (target->reset_halt) {
2002 /* clear pending Reset Catch debug event */
2003 retval = aarch64_clear_reset_catch(target);
2004 if (retval != ERROR_OK)
2005 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2006 target_name(target));
2007
2008 /* disable Reset Catch debug event */
2009 retval = aarch64_enable_reset_catch(target, false);
2010 if (retval != ERROR_OK)
2011 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2012 target_name(target));
2013
2014 if (target->state != TARGET_HALTED) {
2015 LOG_WARNING("%s: ran after reset and before halt ...",
2016 target_name(target));
2017 if (target_was_examined(target)) {
2018 retval = aarch64_halt_one(target, HALT_LAZY);
2019 if (retval != ERROR_OK)
2020 return retval;
2021 } else {
2022 target->state = TARGET_UNKNOWN;
2023 }
2024 }
2025 }
2026
2027 return ERROR_OK;
2028 }
2029
2030 static int aarch64_write_cpu_memory_slow(struct target *target,
2031 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2032 {
2033 struct armv8_common *armv8 = target_to_armv8(target);
2034 struct arm_dpm *dpm = &armv8->dpm;
2035 struct arm *arm = &armv8->arm;
2036 int retval;
2037
2038 armv8_reg_current(arm, 1)->dirty = true;
2039
2040 /* change DCC to normal mode if necessary */
2041 if (*dscr & DSCR_MA) {
2042 *dscr &= ~DSCR_MA;
2043 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2044 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2045 if (retval != ERROR_OK)
2046 return retval;
2047 }
2048
2049 while (count) {
2050 uint32_t data, opcode;
2051
2052 /* write the data to store into DTRRX */
2053 if (size == 1)
2054 data = *buffer;
2055 else if (size == 2)
2056 data = target_buffer_get_u16(target, buffer);
2057 else
2058 data = target_buffer_get_u32(target, buffer);
2059 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2060 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2061 if (retval != ERROR_OK)
2062 return retval;
2063
2064 if (arm->core_state == ARM_STATE_AARCH64)
2065 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2066 else
2067 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2068 if (retval != ERROR_OK)
2069 return retval;
2070
2071 if (size == 1)
2072 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2073 else if (size == 2)
2074 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2075 else
2076 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2077 retval = dpm->instr_execute(dpm, opcode);
2078 if (retval != ERROR_OK)
2079 return retval;
2080
2081 /* Advance */
2082 buffer += size;
2083 --count;
2084 }
2085
2086 return ERROR_OK;
2087 }
2088
2089 static int aarch64_write_cpu_memory_fast(struct target *target,
2090 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2091 {
2092 struct armv8_common *armv8 = target_to_armv8(target);
2093 struct arm *arm = &armv8->arm;
2094 int retval;
2095
2096 armv8_reg_current(arm, 1)->dirty = true;
2097
2098 /* Step 1.d - Change DCC to memory mode */
2099 *dscr |= DSCR_MA;
2100 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2101 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2102 if (retval != ERROR_OK)
2103 return retval;
2104
2105
2106 /* Step 2.a - Do the write */
2107 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2108 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2109 if (retval != ERROR_OK)
2110 return retval;
2111
2112 /* Step 3.a - Switch DTR mode back to Normal mode */
2113 *dscr &= ~DSCR_MA;
2114 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2115 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2116 if (retval != ERROR_OK)
2117 return retval;
2118
2119 return ERROR_OK;
2120 }
2121
2122 static int aarch64_write_cpu_memory(struct target *target,
2123 uint64_t address, uint32_t size,
2124 uint32_t count, const uint8_t *buffer)
2125 {
2126 /* write memory through APB-AP */
2127 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2128 struct armv8_common *armv8 = target_to_armv8(target);
2129 struct arm_dpm *dpm = &armv8->dpm;
2130 struct arm *arm = &armv8->arm;
2131 uint32_t dscr;
2132
2133 if (target->state != TARGET_HALTED) {
2134 LOG_WARNING("target not halted");
2135 return ERROR_TARGET_NOT_HALTED;
2136 }
2137
2138 /* Mark register X0 as dirty, as it will be used
2139 * for transferring the data.
2140 * It will be restored automatically when exiting
2141 * debug mode
2142 */
2143 armv8_reg_current(arm, 0)->dirty = true;
2144
2145 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2146
2147 /* Read DSCR */
2148 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2149 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2150 if (retval != ERROR_OK)
2151 return retval;
2152
2153 /* Set Normal access mode */
2154 dscr = (dscr & ~DSCR_MA);
2155 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2156 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2157 if (retval != ERROR_OK)
2158 return retval;
2159
2160 if (arm->core_state == ARM_STATE_AARCH64) {
2161 /* Write X0 with value 'address' using write procedure */
2162 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2163 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2164 retval = dpm->instr_write_data_dcc_64(dpm,
2165 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2166 } else {
2167 /* Write R0 with value 'address' using write procedure */
2168 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2169 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2170 retval = dpm->instr_write_data_dcc(dpm,
2171 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2172 }
2173
2174 if (retval != ERROR_OK)
2175 return retval;
2176
2177 if (size == 4 && (address % 4) == 0)
2178 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2179 else
2180 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2181
2182 if (retval != ERROR_OK) {
2183 /* Unset DTR mode */
2184 mem_ap_read_atomic_u32(armv8->debug_ap,
2185 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2186 dscr &= ~DSCR_MA;
2187 mem_ap_write_atomic_u32(armv8->debug_ap,
2188 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2189 }
2190
2191 /* Check for sticky abort flags in the DSCR */
2192 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2193 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2194 if (retval != ERROR_OK)
2195 return retval;
2196
2197 dpm->dscr = dscr;
2198 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2199 /* Abort occurred - clear it and exit */
2200 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2201 armv8_dpm_handle_exception(dpm, true);
2202 return ERROR_FAIL;
2203 }
2204
2205 /* Done */
2206 return ERROR_OK;
2207 }
2208
2209 static int aarch64_read_cpu_memory_slow(struct target *target,
2210 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2211 {
2212 struct armv8_common *armv8 = target_to_armv8(target);
2213 struct arm_dpm *dpm = &armv8->dpm;
2214 struct arm *arm = &armv8->arm;
2215 int retval;
2216
2217 armv8_reg_current(arm, 1)->dirty = true;
2218
2219 /* change DCC to normal mode (if necessary) */
2220 if (*dscr & DSCR_MA) {
2221 *dscr &= DSCR_MA;
2222 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2223 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2224 if (retval != ERROR_OK)
2225 return retval;
2226 }
2227
2228 while (count) {
2229 uint32_t opcode, data;
2230
2231 if (size == 1)
2232 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2233 else if (size == 2)
2234 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2235 else
2236 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2237 retval = dpm->instr_execute(dpm, opcode);
2238 if (retval != ERROR_OK)
2239 return retval;
2240
2241 if (arm->core_state == ARM_STATE_AARCH64)
2242 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2243 else
2244 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2245 if (retval != ERROR_OK)
2246 return retval;
2247
2248 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2249 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2250 if (retval != ERROR_OK)
2251 return retval;
2252
2253 if (size == 1)
2254 *buffer = (uint8_t)data;
2255 else if (size == 2)
2256 target_buffer_set_u16(target, buffer, (uint16_t)data);
2257 else
2258 target_buffer_set_u32(target, buffer, data);
2259
2260 /* Advance */
2261 buffer += size;
2262 --count;
2263 }
2264
2265 return ERROR_OK;
2266 }
2267
2268 static int aarch64_read_cpu_memory_fast(struct target *target,
2269 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2270 {
2271 struct armv8_common *armv8 = target_to_armv8(target);
2272 struct arm_dpm *dpm = &armv8->dpm;
2273 struct arm *arm = &armv8->arm;
2274 int retval;
2275 uint32_t value;
2276
2277 /* Mark X1 as dirty */
2278 armv8_reg_current(arm, 1)->dirty = true;
2279
2280 if (arm->core_state == ARM_STATE_AARCH64) {
2281 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2282 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2283 } else {
2284 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2285 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2286 }
2287
2288 if (retval != ERROR_OK)
2289 return retval;
2290
2291 /* Step 1.e - Change DCC to memory mode */
2292 *dscr |= DSCR_MA;
2293 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2294 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2295 if (retval != ERROR_OK)
2296 return retval;
2297
2298 /* Step 1.f - read DBGDTRTX and discard the value */
2299 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2300 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2301 if (retval != ERROR_OK)
2302 return retval;
2303
2304 count--;
2305 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2306 * Abort flags are sticky, so can be read at end of transactions
2307 *
2308 * This data is read in aligned to 32 bit boundary.
2309 */
2310
2311 if (count) {
2312 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2313 * increments X0 by 4. */
2314 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2315 armv8->debug_base + CPUV8_DBG_DTRTX);
2316 if (retval != ERROR_OK)
2317 return retval;
2318 }
2319
2320 /* Step 3.a - set DTR access mode back to Normal mode */
2321 *dscr &= ~DSCR_MA;
2322 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2323 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2324 if (retval != ERROR_OK)
2325 return retval;
2326
2327 /* Step 3.b - read DBGDTRTX for the final value */
2328 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2329 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2330 if (retval != ERROR_OK)
2331 return retval;
2332
2333 target_buffer_set_u32(target, buffer + count * 4, value);
2334 return retval;
2335 }
2336
2337 static int aarch64_read_cpu_memory(struct target *target,
2338 target_addr_t address, uint32_t size,
2339 uint32_t count, uint8_t *buffer)
2340 {
2341 /* read memory through APB-AP */
2342 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2343 struct armv8_common *armv8 = target_to_armv8(target);
2344 struct arm_dpm *dpm = &armv8->dpm;
2345 struct arm *arm = &armv8->arm;
2346 uint32_t dscr;
2347
2348 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2349 address, size, count);
2350
2351 if (target->state != TARGET_HALTED) {
2352 LOG_WARNING("target not halted");
2353 return ERROR_TARGET_NOT_HALTED;
2354 }
2355
2356 /* Mark register X0 as dirty, as it will be used
2357 * for transferring the data.
2358 * It will be restored automatically when exiting
2359 * debug mode
2360 */
2361 armv8_reg_current(arm, 0)->dirty = true;
2362
2363 /* Read DSCR */
2364 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2365 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2366 if (retval != ERROR_OK)
2367 return retval;
2368
2369 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2370
2371 /* Set Normal access mode */
2372 dscr &= ~DSCR_MA;
2373 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2374 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2375 if (retval != ERROR_OK)
2376 return retval;
2377
2378 if (arm->core_state == ARM_STATE_AARCH64) {
2379 /* Write X0 with value 'address' using write procedure */
2380 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2381 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2382 retval = dpm->instr_write_data_dcc_64(dpm,
2383 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2384 } else {
2385 /* Write R0 with value 'address' using write procedure */
2386 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2387 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2388 retval = dpm->instr_write_data_dcc(dpm,
2389 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2390 }
2391
2392 if (retval != ERROR_OK)
2393 return retval;
2394
2395 if (size == 4 && (address % 4) == 0)
2396 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2397 else
2398 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2399
2400 if (dscr & DSCR_MA) {
2401 dscr &= ~DSCR_MA;
2402 mem_ap_write_atomic_u32(armv8->debug_ap,
2403 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2404 }
2405
2406 if (retval != ERROR_OK)
2407 return retval;
2408
2409 /* Check for sticky abort flags in the DSCR */
2410 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2411 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2412 if (retval != ERROR_OK)
2413 return retval;
2414
2415 dpm->dscr = dscr;
2416
2417 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2418 /* Abort occurred - clear it and exit */
2419 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2420 armv8_dpm_handle_exception(dpm, true);
2421 return ERROR_FAIL;
2422 }
2423
2424 /* Done */
2425 return ERROR_OK;
2426 }
2427
2428 static int aarch64_read_phys_memory(struct target *target,
2429 target_addr_t address, uint32_t size,
2430 uint32_t count, uint8_t *buffer)
2431 {
2432 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2433
2434 if (count && buffer) {
2435 /* read memory through APB-AP */
2436 retval = aarch64_mmu_modify(target, 0);
2437 if (retval != ERROR_OK)
2438 return retval;
2439 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2440 }
2441 return retval;
2442 }
2443
2444 static int aarch64_read_memory(struct target *target, target_addr_t address,
2445 uint32_t size, uint32_t count, uint8_t *buffer)
2446 {
2447 int mmu_enabled = 0;
2448 int retval;
2449
2450 /* determine if MMU was enabled on target stop */
2451 retval = aarch64_mmu(target, &mmu_enabled);
2452 if (retval != ERROR_OK)
2453 return retval;
2454
2455 if (mmu_enabled) {
2456 /* enable MMU as we could have disabled it for phys access */
2457 retval = aarch64_mmu_modify(target, 1);
2458 if (retval != ERROR_OK)
2459 return retval;
2460 }
2461 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2462 }
2463
2464 static int aarch64_write_phys_memory(struct target *target,
2465 target_addr_t address, uint32_t size,
2466 uint32_t count, const uint8_t *buffer)
2467 {
2468 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2469
2470 if (count && buffer) {
2471 /* write memory through APB-AP */
2472 retval = aarch64_mmu_modify(target, 0);
2473 if (retval != ERROR_OK)
2474 return retval;
2475 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2476 }
2477
2478 return retval;
2479 }
2480
2481 static int aarch64_write_memory(struct target *target, target_addr_t address,
2482 uint32_t size, uint32_t count, const uint8_t *buffer)
2483 {
2484 int mmu_enabled = 0;
2485 int retval;
2486
2487 /* determine if MMU was enabled on target stop */
2488 retval = aarch64_mmu(target, &mmu_enabled);
2489 if (retval != ERROR_OK)
2490 return retval;
2491
2492 if (mmu_enabled) {
2493 /* enable MMU as we could have disabled it for phys access */
2494 retval = aarch64_mmu_modify(target, 1);
2495 if (retval != ERROR_OK)
2496 return retval;
2497 }
2498 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2499 }
2500
2501 static int aarch64_handle_target_request(void *priv)
2502 {
2503 struct target *target = priv;
2504 struct armv8_common *armv8 = target_to_armv8(target);
2505 int retval;
2506
2507 if (!target_was_examined(target))
2508 return ERROR_OK;
2509 if (!target->dbg_msg_enabled)
2510 return ERROR_OK;
2511
2512 if (target->state == TARGET_RUNNING) {
2513 uint32_t request;
2514 uint32_t dscr;
2515 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2516 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2517
2518 /* check if we have data */
2519 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2520 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2521 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2522 if (retval == ERROR_OK) {
2523 target_request(target, request);
2524 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2525 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2526 }
2527 }
2528 }
2529
2530 return ERROR_OK;
2531 }
2532
2533 static int aarch64_examine_first(struct target *target)
2534 {
2535 struct aarch64_common *aarch64 = target_to_aarch64(target);
2536 struct armv8_common *armv8 = &aarch64->armv8_common;
2537 struct adiv5_dap *swjdp = armv8->arm.dap;
2538 struct aarch64_private_config *pc = target->private_config;
2539 int i;
2540 int retval = ERROR_OK;
2541 uint64_t debug, ttypr;
2542 uint32_t cpuid;
2543 uint32_t tmp0, tmp1, tmp2, tmp3;
2544 debug = ttypr = cpuid = 0;
2545
2546 if (!pc)
2547 return ERROR_FAIL;
2548
2549 if (!armv8->debug_ap) {
2550 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2551 /* Search for the APB-AB */
2552 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2553 if (retval != ERROR_OK) {
2554 LOG_ERROR("Could not find APB-AP for debug access");
2555 return retval;
2556 }
2557 } else {
2558 armv8->debug_ap = dap_get_ap(swjdp, pc->adiv5_config.ap_num);
2559 if (!armv8->debug_ap) {
2560 LOG_ERROR("Cannot get AP");
2561 return ERROR_FAIL;
2562 }
2563 }
2564 }
2565
2566 retval = mem_ap_init(armv8->debug_ap);
2567 if (retval != ERROR_OK) {
2568 LOG_ERROR("Could not initialize the APB-AP");
2569 return retval;
2570 }
2571
2572 armv8->debug_ap->memaccess_tck = 10;
2573
2574 if (!target->dbgbase_set) {
2575 /* Lookup Processor DAP */
2576 retval = dap_lookup_cs_component(armv8->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2577 &armv8->debug_base, target->coreid);
2578 if (retval != ERROR_OK)
2579 return retval;
2580 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2581 target->coreid, armv8->debug_base);
2582 } else
2583 armv8->debug_base = target->dbgbase;
2584
2585 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2586 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2587 if (retval != ERROR_OK) {
2588 LOG_DEBUG("Examine %s failed", "oslock");
2589 return retval;
2590 }
2591
2592 retval = mem_ap_read_u32(armv8->debug_ap,
2593 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2594 if (retval != ERROR_OK) {
2595 LOG_DEBUG("Examine %s failed", "CPUID");
2596 return retval;
2597 }
2598
2599 retval = mem_ap_read_u32(armv8->debug_ap,
2600 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2601 retval += mem_ap_read_u32(armv8->debug_ap,
2602 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2603 if (retval != ERROR_OK) {
2604 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2605 return retval;
2606 }
2607 retval = mem_ap_read_u32(armv8->debug_ap,
2608 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2609 retval += mem_ap_read_u32(armv8->debug_ap,
2610 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2611 if (retval != ERROR_OK) {
2612 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2613 return retval;
2614 }
2615
2616 retval = dap_run(armv8->debug_ap->dap);
2617 if (retval != ERROR_OK) {
2618 LOG_ERROR("%s: examination failed\n", target_name(target));
2619 return retval;
2620 }
2621
2622 ttypr |= tmp1;
2623 ttypr = (ttypr << 32) | tmp0;
2624 debug |= tmp3;
2625 debug = (debug << 32) | tmp2;
2626
2627 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2628 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2629 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2630
2631 if (!pc->cti) {
2632 LOG_TARGET_ERROR(target, "CTI not specified");
2633 return ERROR_FAIL;
2634 }
2635
2636 armv8->cti = pc->cti;
2637
2638 retval = aarch64_dpm_setup(aarch64, debug);
2639 if (retval != ERROR_OK)
2640 return retval;
2641
2642 /* Setup Breakpoint Register Pairs */
2643 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2644 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2645 aarch64->brp_num_available = aarch64->brp_num;
2646 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2647 for (i = 0; i < aarch64->brp_num; i++) {
2648 aarch64->brp_list[i].used = 0;
2649 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2650 aarch64->brp_list[i].type = BRP_NORMAL;
2651 else
2652 aarch64->brp_list[i].type = BRP_CONTEXT;
2653 aarch64->brp_list[i].value = 0;
2654 aarch64->brp_list[i].control = 0;
2655 aarch64->brp_list[i].brpn = i;
2656 }
2657
2658 /* Setup Watchpoint Register Pairs */
2659 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2660 aarch64->wp_num_available = aarch64->wp_num;
2661 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2662 for (i = 0; i < aarch64->wp_num; i++) {
2663 aarch64->wp_list[i].used = 0;
2664 aarch64->wp_list[i].type = BRP_NORMAL;
2665 aarch64->wp_list[i].value = 0;
2666 aarch64->wp_list[i].control = 0;
2667 aarch64->wp_list[i].brpn = i;
2668 }
2669
2670 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2671 aarch64->brp_num, aarch64->wp_num);
2672
2673 target->state = TARGET_UNKNOWN;
2674 target->debug_reason = DBG_REASON_NOTHALTED;
2675 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2676 target_set_examined(target);
2677 return ERROR_OK;
2678 }
2679
2680 static int aarch64_examine(struct target *target)
2681 {
2682 int retval = ERROR_OK;
2683
2684 /* don't re-probe hardware after each reset */
2685 if (!target_was_examined(target))
2686 retval = aarch64_examine_first(target);
2687
2688 /* Configure core debug access */
2689 if (retval == ERROR_OK)
2690 retval = aarch64_init_debug_access(target);
2691
2692 return retval;
2693 }
2694
2695 /*
2696 * Cortex-A8 target creation and initialization
2697 */
2698
2699 static int aarch64_init_target(struct command_context *cmd_ctx,
2700 struct target *target)
2701 {
2702 /* examine_first() does a bunch of this */
2703 arm_semihosting_init(target);
2704 return ERROR_OK;
2705 }
2706
2707 static int aarch64_init_arch_info(struct target *target,
2708 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2709 {
2710 struct armv8_common *armv8 = &aarch64->armv8_common;
2711
2712 /* Setup struct aarch64_common */
2713 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2714 armv8->arm.dap = dap;
2715
2716 /* register arch-specific functions */
2717 armv8->examine_debug_reason = NULL;
2718 armv8->post_debug_entry = aarch64_post_debug_entry;
2719 armv8->pre_restore_context = NULL;
2720 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2721
2722 armv8_init_arch_info(target, armv8);
2723 target_register_timer_callback(aarch64_handle_target_request, 1,
2724 TARGET_TIMER_TYPE_PERIODIC, target);
2725
2726 return ERROR_OK;
2727 }
2728
2729 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2730 {
2731 struct aarch64_private_config *pc = target->private_config;
2732 struct aarch64_common *aarch64;
2733
2734 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2735 return ERROR_FAIL;
2736
2737 aarch64 = calloc(1, sizeof(struct aarch64_common));
2738 if (!aarch64) {
2739 LOG_ERROR("Out of memory");
2740 return ERROR_FAIL;
2741 }
2742
2743 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2744 }
2745
2746 static void aarch64_deinit_target(struct target *target)
2747 {
2748 struct aarch64_common *aarch64 = target_to_aarch64(target);
2749 struct armv8_common *armv8 = &aarch64->armv8_common;
2750 struct arm_dpm *dpm = &armv8->dpm;
2751
2752 if (armv8->debug_ap)
2753 dap_put_ap(armv8->debug_ap);
2754
2755 armv8_free_reg_cache(target);
2756 free(aarch64->brp_list);
2757 free(dpm->dbp);
2758 free(dpm->dwp);
2759 free(target->private_config);
2760 free(aarch64);
2761 }
2762
2763 static int aarch64_mmu(struct target *target, int *enabled)
2764 {
2765 if (target->state != TARGET_HALTED) {
2766 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2767 return ERROR_TARGET_INVALID;
2768 }
2769
2770 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2771 return ERROR_OK;
2772 }
2773
2774 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2775 target_addr_t *phys)
2776 {
2777 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2778 }
2779
2780 /*
2781 * private target configuration items
2782 */
2783 enum aarch64_cfg_param {
2784 CFG_CTI,
2785 };
2786
2787 static const struct jim_nvp nvp_config_opts[] = {
2788 { .name = "-cti", .value = CFG_CTI },
2789 { .name = NULL, .value = -1 }
2790 };
2791
2792 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2793 {
2794 struct aarch64_private_config *pc;
2795 struct jim_nvp *n;
2796 int e;
2797
2798 pc = (struct aarch64_private_config *)target->private_config;
2799 if (!pc) {
2800 pc = calloc(1, sizeof(struct aarch64_private_config));
2801 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2802 target->private_config = pc;
2803 }
2804
2805 /*
2806 * Call adiv5_jim_configure() to parse the common DAP options
2807 * It will return JIM_CONTINUE if it didn't find any known
2808 * options, JIM_OK if it correctly parsed the topmost option
2809 * and JIM_ERR if an error occurred during parameter evaluation.
2810 * For JIM_CONTINUE, we check our own params.
2811 *
2812 * adiv5_jim_configure() assumes 'private_config' to point to
2813 * 'struct adiv5_private_config'. Override 'private_config'!
2814 */
2815 target->private_config = &pc->adiv5_config;
2816 e = adiv5_jim_configure(target, goi);
2817 target->private_config = pc;
2818 if (e != JIM_CONTINUE)
2819 return e;
2820
2821 /* parse config or cget options ... */
2822 if (goi->argc > 0) {
2823 Jim_SetEmptyResult(goi->interp);
2824
2825 /* check first if topmost item is for us */
2826 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2827 goi->argv[0], &n);
2828 if (e != JIM_OK)
2829 return JIM_CONTINUE;
2830
2831 e = jim_getopt_obj(goi, NULL);
2832 if (e != JIM_OK)
2833 return e;
2834
2835 switch (n->value) {
2836 case CFG_CTI: {
2837 if (goi->isconfigure) {
2838 Jim_Obj *o_cti;
2839 struct arm_cti *cti;
2840 e = jim_getopt_obj(goi, &o_cti);
2841 if (e != JIM_OK)
2842 return e;
2843 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2844 if (!cti) {
2845 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2846 return JIM_ERR;
2847 }
2848 pc->cti = cti;
2849 } else {
2850 if (goi->argc != 0) {
2851 Jim_WrongNumArgs(goi->interp,
2852 goi->argc, goi->argv,
2853 "NO PARAMS");
2854 return JIM_ERR;
2855 }
2856
2857 if (!pc || !pc->cti) {
2858 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2859 return JIM_ERR;
2860 }
2861 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2862 }
2863 break;
2864 }
2865
2866 default:
2867 return JIM_CONTINUE;
2868 }
2869 }
2870
2871 return JIM_OK;
2872 }
2873
2874 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2875 {
2876 struct target *target = get_current_target(CMD_CTX);
2877 struct armv8_common *armv8 = target_to_armv8(target);
2878
2879 return armv8_handle_cache_info_command(CMD,
2880 &armv8->armv8_mmu.armv8_cache);
2881 }
2882
2883 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2884 {
2885 struct target *target = get_current_target(CMD_CTX);
2886 if (!target_was_examined(target)) {
2887 LOG_ERROR("target not examined yet");
2888 return ERROR_FAIL;
2889 }
2890
2891 return aarch64_init_debug_access(target);
2892 }
2893
2894 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2895 {
2896 struct target *target = get_current_target(CMD_CTX);
2897
2898 if (!target) {
2899 LOG_ERROR("No target selected");
2900 return ERROR_FAIL;
2901 }
2902
2903 struct aarch64_common *aarch64 = target_to_aarch64(target);
2904
2905 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2906 command_print(CMD, "current target isn't an AArch64");
2907 return ERROR_FAIL;
2908 }
2909
2910 int count = 1;
2911 target_addr_t address;
2912
2913 switch (CMD_ARGC) {
2914 case 2:
2915 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2916 /* FALL THROUGH */
2917 case 1:
2918 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2919 break;
2920 default:
2921 return ERROR_COMMAND_SYNTAX_ERROR;
2922 }
2923
2924 return a64_disassemble(CMD, target, address, count);
2925 }
2926
2927 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2928 {
2929 struct target *target = get_current_target(CMD_CTX);
2930 struct aarch64_common *aarch64 = target_to_aarch64(target);
2931
2932 static const struct jim_nvp nvp_maskisr_modes[] = {
2933 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2934 { .name = "on", .value = AARCH64_ISRMASK_ON },
2935 { .name = NULL, .value = -1 },
2936 };
2937 const struct jim_nvp *n;
2938
2939 if (CMD_ARGC > 0) {
2940 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2941 if (!n->name) {
2942 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2943 return ERROR_COMMAND_SYNTAX_ERROR;
2944 }
2945
2946 aarch64->isrmasking_mode = n->value;
2947 }
2948
2949 n = jim_nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2950 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2951
2952 return ERROR_OK;
2953 }
2954
2955 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2956 {
2957 struct command *c = jim_to_command(interp);
2958 struct command_context *context;
2959 struct target *target;
2960 struct arm *arm;
2961 int retval;
2962 bool is_mcr = false;
2963 int arg_cnt = 0;
2964
2965 if (!strcmp(c->name, "mcr")) {
2966 is_mcr = true;
2967 arg_cnt = 7;
2968 } else {
2969 arg_cnt = 6;
2970 }
2971
2972 context = current_command_context(interp);
2973 assert(context);
2974
2975 target = get_current_target(context);
2976 if (!target) {
2977 LOG_ERROR("%s: no current target", __func__);
2978 return JIM_ERR;
2979 }
2980 if (!target_was_examined(target)) {
2981 LOG_ERROR("%s: not yet examined", target_name(target));
2982 return JIM_ERR;
2983 }
2984
2985 arm = target_to_arm(target);
2986 if (!is_arm(arm)) {
2987 LOG_ERROR("%s: not an ARM", target_name(target));
2988 return JIM_ERR;
2989 }
2990
2991 if (target->state != TARGET_HALTED)
2992 return ERROR_TARGET_NOT_HALTED;
2993
2994 if (arm->core_state == ARM_STATE_AARCH64) {
2995 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2996 return JIM_ERR;
2997 }
2998
2999 if (argc != arg_cnt) {
3000 LOG_ERROR("%s: wrong number of arguments", __func__);
3001 return JIM_ERR;
3002 }
3003
3004 int cpnum;
3005 uint32_t op1;
3006 uint32_t op2;
3007 uint32_t crn;
3008 uint32_t crm;
3009 uint32_t value;
3010 long l;
3011
3012 /* NOTE: parameter sequence matches ARM instruction set usage:
3013 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3014 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3015 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3016 */
3017 retval = Jim_GetLong(interp, argv[1], &l);
3018 if (retval != JIM_OK)
3019 return retval;
3020 if (l & ~0xf) {
3021 LOG_ERROR("%s: %s %d out of range", __func__,
3022 "coprocessor", (int) l);
3023 return JIM_ERR;
3024 }
3025 cpnum = l;
3026
3027 retval = Jim_GetLong(interp, argv[2], &l);
3028 if (retval != JIM_OK)
3029 return retval;
3030 if (l & ~0x7) {
3031 LOG_ERROR("%s: %s %d out of range", __func__,
3032 "op1", (int) l);
3033 return JIM_ERR;
3034 }
3035 op1 = l;
3036
3037 retval = Jim_GetLong(interp, argv[3], &l);
3038 if (retval != JIM_OK)
3039 return retval;
3040 if (l & ~0xf) {
3041 LOG_ERROR("%s: %s %d out of range", __func__,
3042 "CRn", (int) l);
3043 return JIM_ERR;
3044 }
3045 crn = l;
3046
3047 retval = Jim_GetLong(interp, argv[4], &l);
3048 if (retval != JIM_OK)
3049 return retval;
3050 if (l & ~0xf) {
3051 LOG_ERROR("%s: %s %d out of range", __func__,
3052 "CRm", (int) l);
3053 return JIM_ERR;
3054 }
3055 crm = l;
3056
3057 retval = Jim_GetLong(interp, argv[5], &l);
3058 if (retval != JIM_OK)
3059 return retval;
3060 if (l & ~0x7) {
3061 LOG_ERROR("%s: %s %d out of range", __func__,
3062 "op2", (int) l);
3063 return JIM_ERR;
3064 }
3065 op2 = l;
3066
3067 value = 0;
3068
3069 if (is_mcr == true) {
3070 retval = Jim_GetLong(interp, argv[6], &l);
3071 if (retval != JIM_OK)
3072 return retval;
3073 value = l;
3074
3075 /* NOTE: parameters reordered! */
3076 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3077 retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3078 if (retval != ERROR_OK)
3079 return JIM_ERR;
3080 } else {
3081 /* NOTE: parameters reordered! */
3082 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3083 retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3084 if (retval != ERROR_OK)
3085 return JIM_ERR;
3086
3087 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
3088 }
3089
3090 return JIM_OK;
3091 }
3092
3093 static const struct command_registration aarch64_exec_command_handlers[] = {
3094 {
3095 .name = "cache_info",
3096 .handler = aarch64_handle_cache_info_command,
3097 .mode = COMMAND_EXEC,
3098 .help = "display information about target caches",
3099 .usage = "",
3100 },
3101 {
3102 .name = "dbginit",
3103 .handler = aarch64_handle_dbginit_command,
3104 .mode = COMMAND_EXEC,
3105 .help = "Initialize core debug",
3106 .usage = "",
3107 },
3108 {
3109 .name = "disassemble",
3110 .handler = aarch64_handle_disassemble_command,
3111 .mode = COMMAND_EXEC,
3112 .help = "Disassemble instructions",
3113 .usage = "address [count]",
3114 },
3115 {
3116 .name = "maskisr",
3117 .handler = aarch64_mask_interrupts_command,
3118 .mode = COMMAND_ANY,
3119 .help = "mask aarch64 interrupts during single-step",
3120 .usage = "['on'|'off']",
3121 },
3122 {
3123 .name = "mcr",
3124 .mode = COMMAND_EXEC,
3125 .jim_handler = jim_mcrmrc,
3126 .help = "write coprocessor register",
3127 .usage = "cpnum op1 CRn CRm op2 value",
3128 },
3129 {
3130 .name = "mrc",
3131 .mode = COMMAND_EXEC,
3132 .jim_handler = jim_mcrmrc,
3133 .help = "read coprocessor register",
3134 .usage = "cpnum op1 CRn CRm op2",
3135 },
3136 {
3137 .chain = smp_command_handlers,
3138 },
3139
3140
3141 COMMAND_REGISTRATION_DONE
3142 };
3143
3144 static const struct command_registration aarch64_command_handlers[] = {
3145 {
3146 .name = "arm",
3147 .mode = COMMAND_ANY,
3148 .help = "ARM Command Group",
3149 .usage = "",
3150 .chain = semihosting_common_handlers
3151 },
3152 {
3153 .chain = armv8_command_handlers,
3154 },
3155 {
3156 .name = "aarch64",
3157 .mode = COMMAND_ANY,
3158 .help = "Aarch64 command group",
3159 .usage = "",
3160 .chain = aarch64_exec_command_handlers,
3161 },
3162 COMMAND_REGISTRATION_DONE
3163 };
3164
3165 struct target_type aarch64_target = {
3166 .name = "aarch64",
3167
3168 .poll = aarch64_poll,
3169 .arch_state = armv8_arch_state,
3170
3171 .halt = aarch64_halt,
3172 .resume = aarch64_resume,
3173 .step = aarch64_step,
3174
3175 .assert_reset = aarch64_assert_reset,
3176 .deassert_reset = aarch64_deassert_reset,
3177
3178 /* REVISIT allow exporting VFP3 registers ... */
3179 .get_gdb_arch = armv8_get_gdb_arch,
3180 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3181
3182 .read_memory = aarch64_read_memory,
3183 .write_memory = aarch64_write_memory,
3184
3185 .add_breakpoint = aarch64_add_breakpoint,
3186 .add_context_breakpoint = aarch64_add_context_breakpoint,
3187 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3188 .remove_breakpoint = aarch64_remove_breakpoint,
3189 .add_watchpoint = aarch64_add_watchpoint,
3190 .remove_watchpoint = aarch64_remove_watchpoint,
3191 .hit_watchpoint = aarch64_hit_watchpoint,
3192
3193 .commands = aarch64_command_handlers,
3194 .target_create = aarch64_target_create,
3195 .target_jim_configure = aarch64_jim_configure,
3196 .init_target = aarch64_init_target,
3197 .deinit_target = aarch64_deinit_target,
3198 .examine = aarch64_examine,
3199
3200 .read_phys_memory = aarch64_read_phys_memory,
3201 .write_phys_memory = aarch64_write_phys_memory,
3202 .mmu = aarch64_mmu,
3203 .virt2phys = aarch64_virt2phys,
3204 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)