target: use 'ULL' suffix for long constants
[openocd.git] / src / target / aarch64.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
5 * *
6 ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include "breakpoints.h"
13 #include "aarch64.h"
14 #include "a64_disassembler.h"
15 #include "register.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
23 #include "smp.h"
24 #include <helper/time_support.h>
25
26 enum restart_mode {
27 RESTART_LAZY,
28 RESTART_SYNC,
29 };
30
31 enum halt_mode {
32 HALT_LAZY,
33 HALT_SYNC,
34 };
35
36 struct aarch64_private_config {
37 struct adiv5_private_config adiv5_config;
38 struct arm_cti *cti;
39 };
40
41 static int aarch64_poll(struct target *target);
42 static int aarch64_debug_entry(struct target *target);
43 static int aarch64_restore_context(struct target *target, bool bpwp);
44 static int aarch64_set_breakpoint(struct target *target,
45 struct breakpoint *breakpoint, uint8_t matchmode);
46 static int aarch64_set_context_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_hybrid_breakpoint(struct target *target,
49 struct breakpoint *breakpoint);
50 static int aarch64_unset_breakpoint(struct target *target,
51 struct breakpoint *breakpoint);
52 static int aarch64_mmu(struct target *target, int *enabled);
53 static int aarch64_virt2phys(struct target *target,
54 target_addr_t virt, target_addr_t *phys);
55 static int aarch64_read_cpu_memory(struct target *target,
56 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
57
58 static int aarch64_restore_system_control_reg(struct target *target)
59 {
60 enum arm_mode target_mode = ARM_MODE_ANY;
61 int retval = ERROR_OK;
62 uint32_t instr;
63
64 struct aarch64_common *aarch64 = target_to_aarch64(target);
65 struct armv8_common *armv8 = target_to_armv8(target);
66
67 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
68 aarch64->system_control_reg_curr = aarch64->system_control_reg;
69 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
70
71 switch (armv8->arm.core_mode) {
72 case ARMV8_64_EL0T:
73 target_mode = ARMV8_64_EL1H;
74 /* fall through */
75 case ARMV8_64_EL1T:
76 case ARMV8_64_EL1H:
77 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
78 break;
79 case ARMV8_64_EL2T:
80 case ARMV8_64_EL2H:
81 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
82 break;
83 case ARMV8_64_EL3H:
84 case ARMV8_64_EL3T:
85 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
86 break;
87
88 case ARM_MODE_SVC:
89 case ARM_MODE_ABT:
90 case ARM_MODE_FIQ:
91 case ARM_MODE_IRQ:
92 case ARM_MODE_HYP:
93 case ARM_MODE_UND:
94 case ARM_MODE_SYS:
95 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
96 break;
97
98 default:
99 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
100 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
101 return ERROR_FAIL;
102 }
103
104 if (target_mode != ARM_MODE_ANY)
105 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
106
107 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
108 if (retval != ERROR_OK)
109 return retval;
110
111 if (target_mode != ARM_MODE_ANY)
112 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
113 }
114
115 return retval;
116 }
117
118 /* modify system_control_reg in order to enable or disable mmu for :
119 * - virt2phys address conversion
120 * - read or write memory in phys or virt address */
121 static int aarch64_mmu_modify(struct target *target, int enable)
122 {
123 struct aarch64_common *aarch64 = target_to_aarch64(target);
124 struct armv8_common *armv8 = &aarch64->armv8_common;
125 int retval = ERROR_OK;
126 enum arm_mode target_mode = ARM_MODE_ANY;
127 uint32_t instr = 0;
128
129 if (enable) {
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64->system_control_reg & 0x1U)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133 return ERROR_FAIL;
134 }
135 if (!(aarch64->system_control_reg_curr & 0x1U))
136 aarch64->system_control_reg_curr |= 0x1U;
137 } else {
138 if (aarch64->system_control_reg_curr & 0x4U) {
139 /* data cache is active */
140 aarch64->system_control_reg_curr &= ~0x4U;
141 /* flush data cache armv8 function to be called */
142 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
144 }
145 if ((aarch64->system_control_reg_curr & 0x1U)) {
146 aarch64->system_control_reg_curr &= ~0x1U;
147 }
148 }
149
150 switch (armv8->arm.core_mode) {
151 case ARMV8_64_EL0T:
152 target_mode = ARMV8_64_EL1H;
153 /* fall through */
154 case ARMV8_64_EL1T:
155 case ARMV8_64_EL1H:
156 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
157 break;
158 case ARMV8_64_EL2T:
159 case ARMV8_64_EL2H:
160 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
161 break;
162 case ARMV8_64_EL3H:
163 case ARMV8_64_EL3T:
164 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
165 break;
166
167 case ARM_MODE_SVC:
168 case ARM_MODE_ABT:
169 case ARM_MODE_FIQ:
170 case ARM_MODE_IRQ:
171 case ARM_MODE_HYP:
172 case ARM_MODE_UND:
173 case ARM_MODE_SYS:
174 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
175 break;
176
177 default:
178 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
179 break;
180 }
181 if (target_mode != ARM_MODE_ANY)
182 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
183
184 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
185 aarch64->system_control_reg_curr);
186
187 if (target_mode != ARM_MODE_ANY)
188 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
189
190 return retval;
191 }
192
193 /*
194 * Basic debug access, very low level assumes state is saved
195 */
196 static int aarch64_init_debug_access(struct target *target)
197 {
198 struct armv8_common *armv8 = target_to_armv8(target);
199 int retval;
200 uint32_t dummy;
201
202 LOG_DEBUG("%s", target_name(target));
203
204 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
205 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
206 if (retval != ERROR_OK) {
207 LOG_DEBUG("Examine %s failed", "oslock");
208 return retval;
209 }
210
211 /* Clear Sticky Power Down status Bit in PRSR to enable access to
212 the registers in the Core Power Domain */
213 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
214 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
215 if (retval != ERROR_OK)
216 return retval;
217
218 /*
219 * Static CTI configuration:
220 * Channel 0 -> trigger outputs HALT request to PE
221 * Channel 1 -> trigger outputs Resume request to PE
222 * Gate all channel trigger events from entering the CTM
223 */
224
225 /* Enable CTI */
226 retval = arm_cti_enable(armv8->cti, true);
227 /* By default, gate all channel events to and from the CTM */
228 if (retval == ERROR_OK)
229 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
230 /* output halt requests to PE on channel 0 event */
231 if (retval == ERROR_OK)
232 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
233 /* output restart requests to PE on channel 1 event */
234 if (retval == ERROR_OK)
235 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
236 if (retval != ERROR_OK)
237 return retval;
238
239 /* Resync breakpoint registers */
240
241 return ERROR_OK;
242 }
243
244 /* Write to memory mapped registers directly with no cache or mmu handling */
245 static int aarch64_dap_write_memap_register_u32(struct target *target,
246 target_addr_t address,
247 uint32_t value)
248 {
249 int retval;
250 struct armv8_common *armv8 = target_to_armv8(target);
251
252 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
253
254 return retval;
255 }
256
257 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
258 {
259 struct arm_dpm *dpm = &a8->armv8_common.dpm;
260 int retval;
261
262 dpm->arm = &a8->armv8_common.arm;
263 dpm->didr = debug;
264
265 retval = armv8_dpm_setup(dpm);
266 if (retval == ERROR_OK)
267 retval = armv8_dpm_initialize(dpm);
268
269 return retval;
270 }
271
272 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
273 {
274 struct armv8_common *armv8 = target_to_armv8(target);
275 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
276 }
277
278 static int aarch64_check_state_one(struct target *target,
279 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
280 {
281 struct armv8_common *armv8 = target_to_armv8(target);
282 uint32_t prsr;
283 int retval;
284
285 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
286 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
287 if (retval != ERROR_OK)
288 return retval;
289
290 if (p_prsr)
291 *p_prsr = prsr;
292
293 if (p_result)
294 *p_result = (prsr & mask) == (val & mask);
295
296 return ERROR_OK;
297 }
298
299 static int aarch64_wait_halt_one(struct target *target)
300 {
301 int retval = ERROR_OK;
302 uint32_t prsr;
303
304 int64_t then = timeval_ms();
305 for (;;) {
306 int halted;
307
308 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
309 if (retval != ERROR_OK || halted)
310 break;
311
312 if (timeval_ms() > then + 1000) {
313 retval = ERROR_TARGET_TIMEOUT;
314 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
315 break;
316 }
317 }
318 return retval;
319 }
320
321 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
322 {
323 int retval = ERROR_OK;
324 struct target_list *head;
325 struct target *first = NULL;
326
327 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
328
329 foreach_smp_target(head, target->smp_targets) {
330 struct target *curr = head->target;
331 struct armv8_common *armv8 = target_to_armv8(curr);
332
333 if (exc_target && curr == target)
334 continue;
335 if (!target_was_examined(curr))
336 continue;
337 if (curr->state != TARGET_RUNNING)
338 continue;
339
340 /* HACK: mark this target as prepared for halting */
341 curr->debug_reason = DBG_REASON_DBGRQ;
342
343 /* open the gate for channel 0 to let HALT requests pass to the CTM */
344 retval = arm_cti_ungate_channel(armv8->cti, 0);
345 if (retval == ERROR_OK)
346 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
347 if (retval != ERROR_OK)
348 break;
349
350 LOG_DEBUG("target %s prepared", target_name(curr));
351
352 if (!first)
353 first = curr;
354 }
355
356 if (p_first) {
357 if (exc_target && first)
358 *p_first = first;
359 else
360 *p_first = target;
361 }
362
363 return retval;
364 }
365
366 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
367 {
368 int retval = ERROR_OK;
369 struct armv8_common *armv8 = target_to_armv8(target);
370
371 LOG_DEBUG("%s", target_name(target));
372
373 /* allow Halting Debug Mode */
374 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
375 if (retval != ERROR_OK)
376 return retval;
377
378 /* trigger an event on channel 0, this outputs a halt request to the PE */
379 retval = arm_cti_pulse_channel(armv8->cti, 0);
380 if (retval != ERROR_OK)
381 return retval;
382
383 if (mode == HALT_SYNC) {
384 retval = aarch64_wait_halt_one(target);
385 if (retval != ERROR_OK) {
386 if (retval == ERROR_TARGET_TIMEOUT)
387 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
388 return retval;
389 }
390 }
391
392 return ERROR_OK;
393 }
394
395 static int aarch64_halt_smp(struct target *target, bool exc_target)
396 {
397 struct target *next = target;
398 int retval;
399
400 /* prepare halt on all PEs of the group */
401 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
402
403 if (exc_target && next == target)
404 return retval;
405
406 /* halt the target PE */
407 if (retval == ERROR_OK)
408 retval = aarch64_halt_one(next, HALT_LAZY);
409
410 if (retval != ERROR_OK)
411 return retval;
412
413 /* wait for all PEs to halt */
414 int64_t then = timeval_ms();
415 for (;;) {
416 bool all_halted = true;
417 struct target_list *head;
418 struct target *curr;
419
420 foreach_smp_target(head, target->smp_targets) {
421 int halted;
422
423 curr = head->target;
424
425 if (!target_was_examined(curr))
426 continue;
427
428 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
429 if (retval != ERROR_OK || !halted) {
430 all_halted = false;
431 break;
432 }
433 }
434
435 if (all_halted)
436 break;
437
438 if (timeval_ms() > then + 1000) {
439 retval = ERROR_TARGET_TIMEOUT;
440 break;
441 }
442
443 /*
444 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
445 * and it looks like the CTI's are not connected by a common
446 * trigger matrix. It seems that we need to halt one core in each
447 * cluster explicitly. So if we find that a core has not halted
448 * yet, we trigger an explicit halt for the second cluster.
449 */
450 retval = aarch64_halt_one(curr, HALT_LAZY);
451 if (retval != ERROR_OK)
452 break;
453 }
454
455 return retval;
456 }
457
458 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
459 {
460 struct target *gdb_target = NULL;
461 struct target_list *head;
462 struct target *curr;
463
464 if (debug_reason == DBG_REASON_NOTHALTED) {
465 LOG_DEBUG("Halting remaining targets in SMP group");
466 aarch64_halt_smp(target, true);
467 }
468
469 /* poll all targets in the group, but skip the target that serves GDB */
470 foreach_smp_target(head, target->smp_targets) {
471 curr = head->target;
472 /* skip calling context */
473 if (curr == target)
474 continue;
475 if (!target_was_examined(curr))
476 continue;
477 /* skip targets that were already halted */
478 if (curr->state == TARGET_HALTED)
479 continue;
480 /* remember the gdb_service->target */
481 if (curr->gdb_service)
482 gdb_target = curr->gdb_service->target;
483 /* skip it */
484 if (curr == gdb_target)
485 continue;
486
487 /* avoid recursion in aarch64_poll() */
488 curr->smp = 0;
489 aarch64_poll(curr);
490 curr->smp = 1;
491 }
492
493 /* after all targets were updated, poll the gdb serving target */
494 if (gdb_target && gdb_target != target)
495 aarch64_poll(gdb_target);
496
497 return ERROR_OK;
498 }
499
500 /*
501 * Aarch64 Run control
502 */
503
504 static int aarch64_poll(struct target *target)
505 {
506 enum target_state prev_target_state;
507 int retval = ERROR_OK;
508 int halted;
509
510 retval = aarch64_check_state_one(target,
511 PRSR_HALT, PRSR_HALT, &halted, NULL);
512 if (retval != ERROR_OK)
513 return retval;
514
515 if (halted) {
516 prev_target_state = target->state;
517 if (prev_target_state != TARGET_HALTED) {
518 enum target_debug_reason debug_reason = target->debug_reason;
519
520 /* We have a halting debug event */
521 target->state = TARGET_HALTED;
522 LOG_DEBUG("Target %s halted", target_name(target));
523 retval = aarch64_debug_entry(target);
524 if (retval != ERROR_OK)
525 return retval;
526
527 if (target->smp)
528 update_halt_gdb(target, debug_reason);
529
530 if (arm_semihosting(target, &retval) != 0)
531 return retval;
532
533 switch (prev_target_state) {
534 case TARGET_RUNNING:
535 case TARGET_UNKNOWN:
536 case TARGET_RESET:
537 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
538 break;
539 case TARGET_DEBUG_RUNNING:
540 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
541 break;
542 default:
543 break;
544 }
545 }
546 } else
547 target->state = TARGET_RUNNING;
548
549 return retval;
550 }
551
552 static int aarch64_halt(struct target *target)
553 {
554 struct armv8_common *armv8 = target_to_armv8(target);
555 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
556
557 if (target->smp)
558 return aarch64_halt_smp(target, false);
559
560 return aarch64_halt_one(target, HALT_SYNC);
561 }
562
563 static int aarch64_restore_one(struct target *target, int current,
564 uint64_t *address, int handle_breakpoints, int debug_execution)
565 {
566 struct armv8_common *armv8 = target_to_armv8(target);
567 struct arm *arm = &armv8->arm;
568 int retval;
569 uint64_t resume_pc;
570
571 LOG_DEBUG("%s", target_name(target));
572
573 if (!debug_execution)
574 target_free_all_working_areas(target);
575
576 /* current = 1: continue on current pc, otherwise continue at <address> */
577 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
578 if (!current)
579 resume_pc = *address;
580 else
581 *address = resume_pc;
582
583 /* Make sure that the Armv7 gdb thumb fixups does not
584 * kill the return address
585 */
586 switch (arm->core_state) {
587 case ARM_STATE_ARM:
588 resume_pc &= 0xFFFFFFFC;
589 break;
590 case ARM_STATE_AARCH64:
591 resume_pc &= 0xFFFFFFFFFFFFFFFCULL;
592 break;
593 case ARM_STATE_THUMB:
594 case ARM_STATE_THUMB_EE:
595 /* When the return address is loaded into PC
596 * bit 0 must be 1 to stay in Thumb state
597 */
598 resume_pc |= 0x1;
599 break;
600 case ARM_STATE_JAZELLE:
601 LOG_ERROR("How do I resume into Jazelle state??");
602 return ERROR_FAIL;
603 }
604 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
605 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
606 arm->pc->dirty = true;
607 arm->pc->valid = true;
608
609 /* called it now before restoring context because it uses cpu
610 * register r0 for restoring system control register */
611 retval = aarch64_restore_system_control_reg(target);
612 if (retval == ERROR_OK)
613 retval = aarch64_restore_context(target, handle_breakpoints);
614
615 return retval;
616 }
617
618 /**
619 * prepare single target for restart
620 *
621 *
622 */
623 static int aarch64_prepare_restart_one(struct target *target)
624 {
625 struct armv8_common *armv8 = target_to_armv8(target);
626 int retval;
627 uint32_t dscr;
628 uint32_t tmp;
629
630 LOG_DEBUG("%s", target_name(target));
631
632 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
633 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
634 if (retval != ERROR_OK)
635 return retval;
636
637 if ((dscr & DSCR_ITE) == 0)
638 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
639 if ((dscr & DSCR_ERR) != 0)
640 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
641
642 /* acknowledge a pending CTI halt event */
643 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
644 /*
645 * open the CTI gate for channel 1 so that the restart events
646 * get passed along to all PEs. Also close gate for channel 0
647 * to isolate the PE from halt events.
648 */
649 if (retval == ERROR_OK)
650 retval = arm_cti_ungate_channel(armv8->cti, 1);
651 if (retval == ERROR_OK)
652 retval = arm_cti_gate_channel(armv8->cti, 0);
653
654 /* make sure that DSCR.HDE is set */
655 if (retval == ERROR_OK) {
656 dscr |= DSCR_HDE;
657 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
658 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
659 }
660
661 if (retval == ERROR_OK) {
662 /* clear sticky bits in PRSR, SDR is now 0 */
663 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
664 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
665 }
666
667 return retval;
668 }
669
670 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
671 {
672 struct armv8_common *armv8 = target_to_armv8(target);
673 int retval;
674
675 LOG_DEBUG("%s", target_name(target));
676
677 /* trigger an event on channel 1, generates a restart request to the PE */
678 retval = arm_cti_pulse_channel(armv8->cti, 1);
679 if (retval != ERROR_OK)
680 return retval;
681
682 if (mode == RESTART_SYNC) {
683 int64_t then = timeval_ms();
684 for (;;) {
685 int resumed;
686 /*
687 * if PRSR.SDR is set now, the target did restart, even
688 * if it's now already halted again (e.g. due to breakpoint)
689 */
690 retval = aarch64_check_state_one(target,
691 PRSR_SDR, PRSR_SDR, &resumed, NULL);
692 if (retval != ERROR_OK || resumed)
693 break;
694
695 if (timeval_ms() > then + 1000) {
696 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
697 retval = ERROR_TARGET_TIMEOUT;
698 break;
699 }
700 }
701 }
702
703 if (retval != ERROR_OK)
704 return retval;
705
706 target->debug_reason = DBG_REASON_NOTHALTED;
707 target->state = TARGET_RUNNING;
708
709 return ERROR_OK;
710 }
711
712 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
713 {
714 int retval;
715
716 LOG_DEBUG("%s", target_name(target));
717
718 retval = aarch64_prepare_restart_one(target);
719 if (retval == ERROR_OK)
720 retval = aarch64_do_restart_one(target, mode);
721
722 return retval;
723 }
724
725 /*
726 * prepare all but the current target for restart
727 */
728 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
729 {
730 int retval = ERROR_OK;
731 struct target_list *head;
732 struct target *first = NULL;
733 uint64_t address;
734
735 foreach_smp_target(head, target->smp_targets) {
736 struct target *curr = head->target;
737
738 /* skip calling target */
739 if (curr == target)
740 continue;
741 if (!target_was_examined(curr))
742 continue;
743 if (curr->state != TARGET_HALTED)
744 continue;
745
746 /* resume at current address, not in step mode */
747 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
748 if (retval == ERROR_OK)
749 retval = aarch64_prepare_restart_one(curr);
750 if (retval != ERROR_OK) {
751 LOG_ERROR("failed to restore target %s", target_name(curr));
752 break;
753 }
754 /* remember the first valid target in the group */
755 if (!first)
756 first = curr;
757 }
758
759 if (p_first)
760 *p_first = first;
761
762 return retval;
763 }
764
765
766 static int aarch64_step_restart_smp(struct target *target)
767 {
768 int retval = ERROR_OK;
769 struct target_list *head;
770 struct target *first = NULL;
771
772 LOG_DEBUG("%s", target_name(target));
773
774 retval = aarch64_prep_restart_smp(target, 0, &first);
775 if (retval != ERROR_OK)
776 return retval;
777
778 if (first)
779 retval = aarch64_do_restart_one(first, RESTART_LAZY);
780 if (retval != ERROR_OK) {
781 LOG_DEBUG("error restarting target %s", target_name(first));
782 return retval;
783 }
784
785 int64_t then = timeval_ms();
786 for (;;) {
787 struct target *curr = target;
788 bool all_resumed = true;
789
790 foreach_smp_target(head, target->smp_targets) {
791 uint32_t prsr;
792 int resumed;
793
794 curr = head->target;
795
796 if (curr == target)
797 continue;
798
799 if (!target_was_examined(curr))
800 continue;
801
802 retval = aarch64_check_state_one(curr,
803 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
804 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
805 all_resumed = false;
806 break;
807 }
808
809 if (curr->state != TARGET_RUNNING) {
810 curr->state = TARGET_RUNNING;
811 curr->debug_reason = DBG_REASON_NOTHALTED;
812 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
813 }
814 }
815
816 if (all_resumed)
817 break;
818
819 if (timeval_ms() > then + 1000) {
820 LOG_ERROR("%s: timeout waiting for target resume", __func__);
821 retval = ERROR_TARGET_TIMEOUT;
822 break;
823 }
824 /*
825 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
826 * and it looks like the CTI's are not connected by a common
827 * trigger matrix. It seems that we need to halt one core in each
828 * cluster explicitly. So if we find that a core has not halted
829 * yet, we trigger an explicit resume for the second cluster.
830 */
831 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
832 if (retval != ERROR_OK)
833 break;
834 }
835
836 return retval;
837 }
838
839 static int aarch64_resume(struct target *target, int current,
840 target_addr_t address, int handle_breakpoints, int debug_execution)
841 {
842 int retval = 0;
843 uint64_t addr = address;
844
845 struct armv8_common *armv8 = target_to_armv8(target);
846 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
847
848 if (target->state != TARGET_HALTED)
849 return ERROR_TARGET_NOT_HALTED;
850
851 /*
852 * If this target is part of a SMP group, prepare the others
853 * targets for resuming. This involves restoring the complete
854 * target register context and setting up CTI gates to accept
855 * resume events from the trigger matrix.
856 */
857 if (target->smp) {
858 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
859 if (retval != ERROR_OK)
860 return retval;
861 }
862
863 /* all targets prepared, restore and restart the current target */
864 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
865 debug_execution);
866 if (retval == ERROR_OK)
867 retval = aarch64_restart_one(target, RESTART_SYNC);
868 if (retval != ERROR_OK)
869 return retval;
870
871 if (target->smp) {
872 int64_t then = timeval_ms();
873 for (;;) {
874 struct target *curr = target;
875 struct target_list *head;
876 bool all_resumed = true;
877
878 foreach_smp_target(head, target->smp_targets) {
879 uint32_t prsr;
880 int resumed;
881
882 curr = head->target;
883 if (curr == target)
884 continue;
885 if (!target_was_examined(curr))
886 continue;
887
888 retval = aarch64_check_state_one(curr,
889 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
890 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
891 all_resumed = false;
892 break;
893 }
894
895 if (curr->state != TARGET_RUNNING) {
896 curr->state = TARGET_RUNNING;
897 curr->debug_reason = DBG_REASON_NOTHALTED;
898 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
899 }
900 }
901
902 if (all_resumed)
903 break;
904
905 if (timeval_ms() > then + 1000) {
906 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
907 retval = ERROR_TARGET_TIMEOUT;
908 break;
909 }
910
911 /*
912 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
913 * and it looks like the CTI's are not connected by a common
914 * trigger matrix. It seems that we need to halt one core in each
915 * cluster explicitly. So if we find that a core has not halted
916 * yet, we trigger an explicit resume for the second cluster.
917 */
918 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
919 if (retval != ERROR_OK)
920 break;
921 }
922 }
923
924 if (retval != ERROR_OK)
925 return retval;
926
927 target->debug_reason = DBG_REASON_NOTHALTED;
928
929 if (!debug_execution) {
930 target->state = TARGET_RUNNING;
931 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
932 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
933 } else {
934 target->state = TARGET_DEBUG_RUNNING;
935 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
936 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
937 }
938
939 return ERROR_OK;
940 }
941
942 static int aarch64_debug_entry(struct target *target)
943 {
944 int retval = ERROR_OK;
945 struct armv8_common *armv8 = target_to_armv8(target);
946 struct arm_dpm *dpm = &armv8->dpm;
947 enum arm_state core_state;
948 uint32_t dscr;
949
950 /* make sure to clear all sticky errors */
951 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
952 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
953 if (retval == ERROR_OK)
954 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
956 if (retval == ERROR_OK)
957 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
958
959 if (retval != ERROR_OK)
960 return retval;
961
962 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
963
964 dpm->dscr = dscr;
965 core_state = armv8_dpm_get_core_state(dpm);
966 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
967 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
968
969 /* close the CTI gate for all events */
970 if (retval == ERROR_OK)
971 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
972 /* discard async exceptions */
973 if (retval == ERROR_OK)
974 retval = dpm->instr_cpsr_sync(dpm);
975 if (retval != ERROR_OK)
976 return retval;
977
978 /* Examine debug reason */
979 armv8_dpm_report_dscr(dpm, dscr);
980
981 /* save the memory address that triggered the watchpoint */
982 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
983 uint32_t tmp;
984
985 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
986 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
987 if (retval != ERROR_OK)
988 return retval;
989 target_addr_t edwar = tmp;
990
991 /* EDWAR[63:32] has unknown content in aarch32 state */
992 if (core_state == ARM_STATE_AARCH64) {
993 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
994 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
995 if (retval != ERROR_OK)
996 return retval;
997 edwar |= ((target_addr_t)tmp) << 32;
998 }
999
1000 armv8->dpm.wp_addr = edwar;
1001 }
1002
1003 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1004
1005 if (retval == ERROR_OK && armv8->post_debug_entry)
1006 retval = armv8->post_debug_entry(target);
1007
1008 return retval;
1009 }
1010
1011 static int aarch64_post_debug_entry(struct target *target)
1012 {
1013 struct aarch64_common *aarch64 = target_to_aarch64(target);
1014 struct armv8_common *armv8 = &aarch64->armv8_common;
1015 int retval;
1016 enum arm_mode target_mode = ARM_MODE_ANY;
1017 uint32_t instr;
1018
1019 switch (armv8->arm.core_mode) {
1020 case ARMV8_64_EL0T:
1021 target_mode = ARMV8_64_EL1H;
1022 /* fall through */
1023 case ARMV8_64_EL1T:
1024 case ARMV8_64_EL1H:
1025 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1026 break;
1027 case ARMV8_64_EL2T:
1028 case ARMV8_64_EL2H:
1029 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1030 break;
1031 case ARMV8_64_EL3H:
1032 case ARMV8_64_EL3T:
1033 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1034 break;
1035
1036 case ARM_MODE_SVC:
1037 case ARM_MODE_ABT:
1038 case ARM_MODE_FIQ:
1039 case ARM_MODE_IRQ:
1040 case ARM_MODE_HYP:
1041 case ARM_MODE_UND:
1042 case ARM_MODE_SYS:
1043 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1044 break;
1045
1046 default:
1047 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1048 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1049 return ERROR_FAIL;
1050 }
1051
1052 if (target_mode != ARM_MODE_ANY)
1053 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1054
1055 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 if (target_mode != ARM_MODE_ANY)
1060 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1061
1062 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1063 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1064
1065 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1066 armv8_identify_cache(armv8);
1067 armv8_read_mpidr(armv8);
1068 }
1069 if (armv8->is_armv8r) {
1070 armv8->armv8_mmu.mmu_enabled = 0;
1071 } else {
1072 armv8->armv8_mmu.mmu_enabled =
1073 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1074 }
1075 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1076 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1077 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1078 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1079 return ERROR_OK;
1080 }
1081
1082 /*
1083 * single-step a target
1084 */
1085 static int aarch64_step(struct target *target, int current, target_addr_t address,
1086 int handle_breakpoints)
1087 {
1088 struct armv8_common *armv8 = target_to_armv8(target);
1089 struct aarch64_common *aarch64 = target_to_aarch64(target);
1090 int saved_retval = ERROR_OK;
1091 int retval;
1092 uint32_t edecr;
1093
1094 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1095
1096 if (target->state != TARGET_HALTED) {
1097 LOG_WARNING("target not halted");
1098 return ERROR_TARGET_NOT_HALTED;
1099 }
1100
1101 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1102 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1103 /* make sure EDECR.SS is not set when restoring the register */
1104
1105 if (retval == ERROR_OK) {
1106 edecr &= ~0x4;
1107 /* set EDECR.SS to enter hardware step mode */
1108 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1109 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1110 }
1111 /* disable interrupts while stepping */
1112 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1113 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1114 /* bail out if stepping setup has failed */
1115 if (retval != ERROR_OK)
1116 return retval;
1117
1118 if (target->smp && (current == 1)) {
1119 /*
1120 * isolate current target so that it doesn't get resumed
1121 * together with the others
1122 */
1123 retval = arm_cti_gate_channel(armv8->cti, 1);
1124 /* resume all other targets in the group */
1125 if (retval == ERROR_OK)
1126 retval = aarch64_step_restart_smp(target);
1127 if (retval != ERROR_OK) {
1128 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1129 return retval;
1130 }
1131 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1132 }
1133
1134 /* all other targets running, restore and restart the current target */
1135 retval = aarch64_restore_one(target, current, &address, 0, 0);
1136 if (retval == ERROR_OK)
1137 retval = aarch64_restart_one(target, RESTART_LAZY);
1138
1139 if (retval != ERROR_OK)
1140 return retval;
1141
1142 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1143 if (!handle_breakpoints)
1144 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1145
1146 int64_t then = timeval_ms();
1147 for (;;) {
1148 int stepped;
1149 uint32_t prsr;
1150
1151 retval = aarch64_check_state_one(target,
1152 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1153 if (retval != ERROR_OK || stepped)
1154 break;
1155
1156 if (timeval_ms() > then + 100) {
1157 LOG_ERROR("timeout waiting for target %s halt after step",
1158 target_name(target));
1159 retval = ERROR_TARGET_TIMEOUT;
1160 break;
1161 }
1162 }
1163
1164 /*
1165 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1166 * causes a timeout. The core takes the step but doesn't complete it and so
1167 * debug state is never entered. However, you can manually halt the core
1168 * as an external debug even is also a WFI wakeup event.
1169 */
1170 if (retval == ERROR_TARGET_TIMEOUT)
1171 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1172
1173 /* restore EDECR */
1174 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1175 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1176 if (retval != ERROR_OK)
1177 return retval;
1178
1179 /* restore interrupts */
1180 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1181 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1182 if (retval != ERROR_OK)
1183 return ERROR_OK;
1184 }
1185
1186 if (saved_retval != ERROR_OK)
1187 return saved_retval;
1188
1189 return ERROR_OK;
1190 }
1191
1192 static int aarch64_restore_context(struct target *target, bool bpwp)
1193 {
1194 struct armv8_common *armv8 = target_to_armv8(target);
1195 struct arm *arm = &armv8->arm;
1196
1197 int retval;
1198
1199 LOG_DEBUG("%s", target_name(target));
1200
1201 if (armv8->pre_restore_context)
1202 armv8->pre_restore_context(target);
1203
1204 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1205 if (retval == ERROR_OK) {
1206 /* registers are now invalid */
1207 register_cache_invalidate(arm->core_cache);
1208 register_cache_invalidate(arm->core_cache->next);
1209 }
1210
1211 return retval;
1212 }
1213
1214 /*
1215 * Cortex-A8 Breakpoint and watchpoint functions
1216 */
1217
1218 /* Setup hardware Breakpoint Register Pair */
1219 static int aarch64_set_breakpoint(struct target *target,
1220 struct breakpoint *breakpoint, uint8_t matchmode)
1221 {
1222 int retval;
1223 int brp_i = 0;
1224 uint32_t control;
1225 uint8_t byte_addr_select = 0x0F;
1226 struct aarch64_common *aarch64 = target_to_aarch64(target);
1227 struct armv8_common *armv8 = &aarch64->armv8_common;
1228 struct aarch64_brp *brp_list = aarch64->brp_list;
1229
1230 if (breakpoint->is_set) {
1231 LOG_WARNING("breakpoint already set");
1232 return ERROR_OK;
1233 }
1234
1235 if (breakpoint->type == BKPT_HARD) {
1236 int64_t bpt_value;
1237 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1238 brp_i++;
1239 if (brp_i >= aarch64->brp_num) {
1240 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1241 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1242 }
1243 breakpoint_hw_set(breakpoint, brp_i);
1244 if (breakpoint->length == 2)
1245 byte_addr_select = (3 << (breakpoint->address & 0x02));
1246 control = ((matchmode & 0x7) << 20)
1247 | (1 << 13)
1248 | (byte_addr_select << 5)
1249 | (3 << 1) | 1;
1250 brp_list[brp_i].used = 1;
1251 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1252 brp_list[brp_i].control = control;
1253 bpt_value = brp_list[brp_i].value;
1254
1255 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1256 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1257 (uint32_t)(bpt_value & 0xFFFFFFFF));
1258 if (retval != ERROR_OK)
1259 return retval;
1260 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1261 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1262 (uint32_t)(bpt_value >> 32));
1263 if (retval != ERROR_OK)
1264 return retval;
1265
1266 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1267 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1268 brp_list[brp_i].control);
1269 if (retval != ERROR_OK)
1270 return retval;
1271 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1272 brp_list[brp_i].control,
1273 brp_list[brp_i].value);
1274
1275 } else if (breakpoint->type == BKPT_SOFT) {
1276 uint32_t opcode;
1277 uint8_t code[4];
1278
1279 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1280 opcode = ARMV8_HLT(11);
1281
1282 if (breakpoint->length != 4)
1283 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1284 } else {
1285 /**
1286 * core_state is ARM_STATE_ARM
1287 * in that case the opcode depends on breakpoint length:
1288 * - if length == 4 => A32 opcode
1289 * - if length == 2 => T32 opcode
1290 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1291 * in that case the length should be changed from 3 to 4 bytes
1292 **/
1293 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1294 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1295
1296 if (breakpoint->length == 3)
1297 breakpoint->length = 4;
1298 }
1299
1300 buf_set_u32(code, 0, 32, opcode);
1301
1302 retval = target_read_memory(target,
1303 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1304 breakpoint->length, 1,
1305 breakpoint->orig_instr);
1306 if (retval != ERROR_OK)
1307 return retval;
1308
1309 armv8_cache_d_inner_flush_virt(armv8,
1310 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1311 breakpoint->length);
1312
1313 retval = target_write_memory(target,
1314 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1315 breakpoint->length, 1, code);
1316 if (retval != ERROR_OK)
1317 return retval;
1318
1319 armv8_cache_d_inner_flush_virt(armv8,
1320 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1321 breakpoint->length);
1322
1323 armv8_cache_i_inner_inval_virt(armv8,
1324 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1325 breakpoint->length);
1326
1327 breakpoint->is_set = true;
1328 }
1329
1330 /* Ensure that halting debug mode is enable */
1331 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1332 if (retval != ERROR_OK) {
1333 LOG_DEBUG("Failed to set DSCR.HDE");
1334 return retval;
1335 }
1336
1337 return ERROR_OK;
1338 }
1339
1340 static int aarch64_set_context_breakpoint(struct target *target,
1341 struct breakpoint *breakpoint, uint8_t matchmode)
1342 {
1343 int retval = ERROR_FAIL;
1344 int brp_i = 0;
1345 uint32_t control;
1346 uint8_t byte_addr_select = 0x0F;
1347 struct aarch64_common *aarch64 = target_to_aarch64(target);
1348 struct armv8_common *armv8 = &aarch64->armv8_common;
1349 struct aarch64_brp *brp_list = aarch64->brp_list;
1350
1351 if (breakpoint->is_set) {
1352 LOG_WARNING("breakpoint already set");
1353 return retval;
1354 }
1355 /*check available context BRPs*/
1356 while ((brp_list[brp_i].used ||
1357 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1358 brp_i++;
1359
1360 if (brp_i >= aarch64->brp_num) {
1361 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1362 return ERROR_FAIL;
1363 }
1364
1365 breakpoint_hw_set(breakpoint, brp_i);
1366 control = ((matchmode & 0x7) << 20)
1367 | (1 << 13)
1368 | (byte_addr_select << 5)
1369 | (3 << 1) | 1;
1370 brp_list[brp_i].used = 1;
1371 brp_list[brp_i].value = (breakpoint->asid);
1372 brp_list[brp_i].control = control;
1373 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1374 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1375 brp_list[brp_i].value);
1376 if (retval != ERROR_OK)
1377 return retval;
1378 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1379 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1380 brp_list[brp_i].control);
1381 if (retval != ERROR_OK)
1382 return retval;
1383 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1384 brp_list[brp_i].control,
1385 brp_list[brp_i].value);
1386 return ERROR_OK;
1387
1388 }
1389
1390 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1391 {
1392 int retval = ERROR_FAIL;
1393 int brp_1 = 0; /* holds the contextID pair */
1394 int brp_2 = 0; /* holds the IVA pair */
1395 uint32_t control_ctx, control_iva;
1396 uint8_t ctx_byte_addr_select = 0x0F;
1397 uint8_t iva_byte_addr_select = 0x0F;
1398 uint8_t ctx_machmode = 0x03;
1399 uint8_t iva_machmode = 0x01;
1400 struct aarch64_common *aarch64 = target_to_aarch64(target);
1401 struct armv8_common *armv8 = &aarch64->armv8_common;
1402 struct aarch64_brp *brp_list = aarch64->brp_list;
1403
1404 if (breakpoint->is_set) {
1405 LOG_WARNING("breakpoint already set");
1406 return retval;
1407 }
1408 /*check available context BRPs*/
1409 while ((brp_list[brp_1].used ||
1410 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1411 brp_1++;
1412
1413 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1414 if (brp_1 >= aarch64->brp_num) {
1415 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1416 return ERROR_FAIL;
1417 }
1418
1419 while ((brp_list[brp_2].used ||
1420 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1421 brp_2++;
1422
1423 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1424 if (brp_2 >= aarch64->brp_num) {
1425 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1426 return ERROR_FAIL;
1427 }
1428
1429 breakpoint_hw_set(breakpoint, brp_1);
1430 breakpoint->linked_brp = brp_2;
1431 control_ctx = ((ctx_machmode & 0x7) << 20)
1432 | (brp_2 << 16)
1433 | (0 << 14)
1434 | (ctx_byte_addr_select << 5)
1435 | (3 << 1) | 1;
1436 brp_list[brp_1].used = 1;
1437 brp_list[brp_1].value = (breakpoint->asid);
1438 brp_list[brp_1].control = control_ctx;
1439 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1440 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1441 brp_list[brp_1].value);
1442 if (retval != ERROR_OK)
1443 return retval;
1444 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1445 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1446 brp_list[brp_1].control);
1447 if (retval != ERROR_OK)
1448 return retval;
1449
1450 control_iva = ((iva_machmode & 0x7) << 20)
1451 | (brp_1 << 16)
1452 | (1 << 13)
1453 | (iva_byte_addr_select << 5)
1454 | (3 << 1) | 1;
1455 brp_list[brp_2].used = 1;
1456 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1457 brp_list[brp_2].control = control_iva;
1458 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1459 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1460 brp_list[brp_2].value & 0xFFFFFFFF);
1461 if (retval != ERROR_OK)
1462 return retval;
1463 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1464 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1465 brp_list[brp_2].value >> 32);
1466 if (retval != ERROR_OK)
1467 return retval;
1468 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1469 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1470 brp_list[brp_2].control);
1471 if (retval != ERROR_OK)
1472 return retval;
1473
1474 return ERROR_OK;
1475 }
1476
1477 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1478 {
1479 int retval;
1480 struct aarch64_common *aarch64 = target_to_aarch64(target);
1481 struct armv8_common *armv8 = &aarch64->armv8_common;
1482 struct aarch64_brp *brp_list = aarch64->brp_list;
1483
1484 if (!breakpoint->is_set) {
1485 LOG_WARNING("breakpoint not set");
1486 return ERROR_OK;
1487 }
1488
1489 if (breakpoint->type == BKPT_HARD) {
1490 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1491 int brp_i = breakpoint->number;
1492 int brp_j = breakpoint->linked_brp;
1493 if (brp_i >= aarch64->brp_num) {
1494 LOG_DEBUG("Invalid BRP number in breakpoint");
1495 return ERROR_OK;
1496 }
1497 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1498 brp_list[brp_i].control, brp_list[brp_i].value);
1499 brp_list[brp_i].used = 0;
1500 brp_list[brp_i].value = 0;
1501 brp_list[brp_i].control = 0;
1502 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1503 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1504 brp_list[brp_i].control);
1505 if (retval != ERROR_OK)
1506 return retval;
1507 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1508 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1509 (uint32_t)brp_list[brp_i].value);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1513 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1514 (uint32_t)brp_list[brp_i].value);
1515 if (retval != ERROR_OK)
1516 return retval;
1517 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1518 LOG_DEBUG("Invalid BRP number in breakpoint");
1519 return ERROR_OK;
1520 }
1521 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1522 brp_list[brp_j].control, brp_list[brp_j].value);
1523 brp_list[brp_j].used = 0;
1524 brp_list[brp_j].value = 0;
1525 brp_list[brp_j].control = 0;
1526 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1527 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1528 brp_list[brp_j].control);
1529 if (retval != ERROR_OK)
1530 return retval;
1531 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1532 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1533 (uint32_t)brp_list[brp_j].value);
1534 if (retval != ERROR_OK)
1535 return retval;
1536 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1537 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1538 (uint32_t)brp_list[brp_j].value);
1539 if (retval != ERROR_OK)
1540 return retval;
1541
1542 breakpoint->linked_brp = 0;
1543 breakpoint->is_set = false;
1544 return ERROR_OK;
1545
1546 } else {
1547 int brp_i = breakpoint->number;
1548 if (brp_i >= aarch64->brp_num) {
1549 LOG_DEBUG("Invalid BRP number in breakpoint");
1550 return ERROR_OK;
1551 }
1552 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1553 brp_list[brp_i].control, brp_list[brp_i].value);
1554 brp_list[brp_i].used = 0;
1555 brp_list[brp_i].value = 0;
1556 brp_list[brp_i].control = 0;
1557 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1558 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1559 brp_list[brp_i].control);
1560 if (retval != ERROR_OK)
1561 return retval;
1562 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1563 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1564 brp_list[brp_i].value);
1565 if (retval != ERROR_OK)
1566 return retval;
1567
1568 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1569 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1570 (uint32_t)brp_list[brp_i].value);
1571 if (retval != ERROR_OK)
1572 return retval;
1573 breakpoint->is_set = false;
1574 return ERROR_OK;
1575 }
1576 } else {
1577 /* restore original instruction (kept in target endianness) */
1578
1579 armv8_cache_d_inner_flush_virt(armv8,
1580 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1581 breakpoint->length);
1582
1583 if (breakpoint->length == 4) {
1584 retval = target_write_memory(target,
1585 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1586 4, 1, breakpoint->orig_instr);
1587 if (retval != ERROR_OK)
1588 return retval;
1589 } else {
1590 retval = target_write_memory(target,
1591 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1592 2, 1, breakpoint->orig_instr);
1593 if (retval != ERROR_OK)
1594 return retval;
1595 }
1596
1597 armv8_cache_d_inner_flush_virt(armv8,
1598 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1599 breakpoint->length);
1600
1601 armv8_cache_i_inner_inval_virt(armv8,
1602 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1603 breakpoint->length);
1604 }
1605 breakpoint->is_set = false;
1606
1607 return ERROR_OK;
1608 }
1609
1610 static int aarch64_add_breakpoint(struct target *target,
1611 struct breakpoint *breakpoint)
1612 {
1613 struct aarch64_common *aarch64 = target_to_aarch64(target);
1614
1615 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1616 LOG_INFO("no hardware breakpoint available");
1617 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1618 }
1619
1620 if (breakpoint->type == BKPT_HARD)
1621 aarch64->brp_num_available--;
1622
1623 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1624 }
1625
1626 static int aarch64_add_context_breakpoint(struct target *target,
1627 struct breakpoint *breakpoint)
1628 {
1629 struct aarch64_common *aarch64 = target_to_aarch64(target);
1630
1631 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1632 LOG_INFO("no hardware breakpoint available");
1633 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1634 }
1635
1636 if (breakpoint->type == BKPT_HARD)
1637 aarch64->brp_num_available--;
1638
1639 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1640 }
1641
1642 static int aarch64_add_hybrid_breakpoint(struct target *target,
1643 struct breakpoint *breakpoint)
1644 {
1645 struct aarch64_common *aarch64 = target_to_aarch64(target);
1646
1647 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1648 LOG_INFO("no hardware breakpoint available");
1649 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1650 }
1651
1652 if (breakpoint->type == BKPT_HARD)
1653 aarch64->brp_num_available--;
1654
1655 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1656 }
1657
1658 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1659 {
1660 struct aarch64_common *aarch64 = target_to_aarch64(target);
1661
1662 #if 0
1663 /* It is perfectly possible to remove breakpoints while the target is running */
1664 if (target->state != TARGET_HALTED) {
1665 LOG_WARNING("target not halted");
1666 return ERROR_TARGET_NOT_HALTED;
1667 }
1668 #endif
1669
1670 if (breakpoint->is_set) {
1671 aarch64_unset_breakpoint(target, breakpoint);
1672 if (breakpoint->type == BKPT_HARD)
1673 aarch64->brp_num_available++;
1674 }
1675
1676 return ERROR_OK;
1677 }
1678
1679 /* Setup hardware Watchpoint Register Pair */
1680 static int aarch64_set_watchpoint(struct target *target,
1681 struct watchpoint *watchpoint)
1682 {
1683 int retval;
1684 int wp_i = 0;
1685 uint32_t control, offset, length;
1686 struct aarch64_common *aarch64 = target_to_aarch64(target);
1687 struct armv8_common *armv8 = &aarch64->armv8_common;
1688 struct aarch64_brp *wp_list = aarch64->wp_list;
1689
1690 if (watchpoint->is_set) {
1691 LOG_WARNING("watchpoint already set");
1692 return ERROR_OK;
1693 }
1694
1695 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1696 wp_i++;
1697 if (wp_i >= aarch64->wp_num) {
1698 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1699 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1700 }
1701
1702 control = (1 << 0) /* enable */
1703 | (3 << 1) /* both user and privileged access */
1704 | (1 << 13); /* higher mode control */
1705
1706 switch (watchpoint->rw) {
1707 case WPT_READ:
1708 control |= 1 << 3;
1709 break;
1710 case WPT_WRITE:
1711 control |= 2 << 3;
1712 break;
1713 case WPT_ACCESS:
1714 control |= 3 << 3;
1715 break;
1716 }
1717
1718 /* Match up to 8 bytes. */
1719 offset = watchpoint->address & 7;
1720 length = watchpoint->length;
1721 if (offset + length > sizeof(uint64_t)) {
1722 length = sizeof(uint64_t) - offset;
1723 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1724 }
1725 for (; length > 0; offset++, length--)
1726 control |= (1 << offset) << 5;
1727
1728 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1729 wp_list[wp_i].control = control;
1730
1731 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1732 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1733 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1734 if (retval != ERROR_OK)
1735 return retval;
1736 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1737 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1738 (uint32_t)(wp_list[wp_i].value >> 32));
1739 if (retval != ERROR_OK)
1740 return retval;
1741
1742 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1743 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1744 control);
1745 if (retval != ERROR_OK)
1746 return retval;
1747 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1748 wp_list[wp_i].control, wp_list[wp_i].value);
1749
1750 /* Ensure that halting debug mode is enable */
1751 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1752 if (retval != ERROR_OK) {
1753 LOG_DEBUG("Failed to set DSCR.HDE");
1754 return retval;
1755 }
1756
1757 wp_list[wp_i].used = 1;
1758 watchpoint_set(watchpoint, wp_i);
1759
1760 return ERROR_OK;
1761 }
1762
1763 /* Clear hardware Watchpoint Register Pair */
1764 static int aarch64_unset_watchpoint(struct target *target,
1765 struct watchpoint *watchpoint)
1766 {
1767 int retval;
1768 struct aarch64_common *aarch64 = target_to_aarch64(target);
1769 struct armv8_common *armv8 = &aarch64->armv8_common;
1770 struct aarch64_brp *wp_list = aarch64->wp_list;
1771
1772 if (!watchpoint->is_set) {
1773 LOG_WARNING("watchpoint not set");
1774 return ERROR_OK;
1775 }
1776
1777 int wp_i = watchpoint->number;
1778 if (wp_i >= aarch64->wp_num) {
1779 LOG_DEBUG("Invalid WP number in watchpoint");
1780 return ERROR_OK;
1781 }
1782 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1783 wp_list[wp_i].control, wp_list[wp_i].value);
1784 wp_list[wp_i].used = 0;
1785 wp_list[wp_i].value = 0;
1786 wp_list[wp_i].control = 0;
1787 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1788 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1789 wp_list[wp_i].control);
1790 if (retval != ERROR_OK)
1791 return retval;
1792 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1793 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1794 wp_list[wp_i].value);
1795 if (retval != ERROR_OK)
1796 return retval;
1797
1798 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1799 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1800 (uint32_t)wp_list[wp_i].value);
1801 if (retval != ERROR_OK)
1802 return retval;
1803 watchpoint->is_set = false;
1804
1805 return ERROR_OK;
1806 }
1807
1808 static int aarch64_add_watchpoint(struct target *target,
1809 struct watchpoint *watchpoint)
1810 {
1811 int retval;
1812 struct aarch64_common *aarch64 = target_to_aarch64(target);
1813
1814 if (aarch64->wp_num_available < 1) {
1815 LOG_INFO("no hardware watchpoint available");
1816 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1817 }
1818
1819 retval = aarch64_set_watchpoint(target, watchpoint);
1820 if (retval == ERROR_OK)
1821 aarch64->wp_num_available--;
1822
1823 return retval;
1824 }
1825
1826 static int aarch64_remove_watchpoint(struct target *target,
1827 struct watchpoint *watchpoint)
1828 {
1829 struct aarch64_common *aarch64 = target_to_aarch64(target);
1830
1831 if (watchpoint->is_set) {
1832 aarch64_unset_watchpoint(target, watchpoint);
1833 aarch64->wp_num_available++;
1834 }
1835
1836 return ERROR_OK;
1837 }
1838
1839 /**
1840 * find out which watchpoint hits
1841 * get exception address and compare the address to watchpoints
1842 */
1843 static int aarch64_hit_watchpoint(struct target *target,
1844 struct watchpoint **hit_watchpoint)
1845 {
1846 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1847 return ERROR_FAIL;
1848
1849 struct armv8_common *armv8 = target_to_armv8(target);
1850
1851 target_addr_t exception_address;
1852 struct watchpoint *wp;
1853
1854 exception_address = armv8->dpm.wp_addr;
1855
1856 if (exception_address == 0xFFFFFFFF)
1857 return ERROR_FAIL;
1858
1859 for (wp = target->watchpoints; wp; wp = wp->next)
1860 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1861 *hit_watchpoint = wp;
1862 return ERROR_OK;
1863 }
1864
1865 return ERROR_FAIL;
1866 }
1867
1868 /*
1869 * Cortex-A8 Reset functions
1870 */
1871
1872 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1873 {
1874 struct armv8_common *armv8 = target_to_armv8(target);
1875 uint32_t edecr;
1876 int retval;
1877
1878 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1879 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1880 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1881 if (retval != ERROR_OK)
1882 return retval;
1883
1884 if (enable)
1885 edecr |= ECR_RCE;
1886 else
1887 edecr &= ~ECR_RCE;
1888
1889 return mem_ap_write_atomic_u32(armv8->debug_ap,
1890 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1891 }
1892
1893 static int aarch64_clear_reset_catch(struct target *target)
1894 {
1895 struct armv8_common *armv8 = target_to_armv8(target);
1896 uint32_t edesr;
1897 int retval;
1898 bool was_triggered;
1899
1900 /* check if Reset Catch debug event triggered as expected */
1901 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1902 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1903 if (retval != ERROR_OK)
1904 return retval;
1905
1906 was_triggered = !!(edesr & ESR_RC);
1907 LOG_DEBUG("Reset Catch debug event %s",
1908 was_triggered ? "triggered" : "NOT triggered!");
1909
1910 if (was_triggered) {
1911 /* clear pending Reset Catch debug event */
1912 edesr &= ~ESR_RC;
1913 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1914 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1915 if (retval != ERROR_OK)
1916 return retval;
1917 }
1918
1919 return ERROR_OK;
1920 }
1921
1922 static int aarch64_assert_reset(struct target *target)
1923 {
1924 struct armv8_common *armv8 = target_to_armv8(target);
1925 enum reset_types reset_config = jtag_get_reset_config();
1926 int retval;
1927
1928 LOG_DEBUG(" ");
1929
1930 /* Issue some kind of warm reset. */
1931 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1932 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1933 else if (reset_config & RESET_HAS_SRST) {
1934 bool srst_asserted = false;
1935
1936 if (target->reset_halt && !(reset_config & RESET_SRST_PULLS_TRST)) {
1937 if (target_was_examined(target)) {
1938
1939 if (reset_config & RESET_SRST_NO_GATING) {
1940 /*
1941 * SRST needs to be asserted *before* Reset Catch
1942 * debug event can be set up.
1943 */
1944 adapter_assert_reset();
1945 srst_asserted = true;
1946 }
1947
1948 /* make sure to clear all sticky errors */
1949 mem_ap_write_atomic_u32(armv8->debug_ap,
1950 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1951
1952 /* set up Reset Catch debug event to halt the CPU after reset */
1953 retval = aarch64_enable_reset_catch(target, true);
1954 if (retval != ERROR_OK)
1955 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1956 target_name(target));
1957 } else {
1958 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1959 target_name(target));
1960 }
1961 }
1962
1963 /* REVISIT handle "pulls" cases, if there's
1964 * hardware that needs them to work.
1965 */
1966 if (!srst_asserted)
1967 adapter_assert_reset();
1968 } else {
1969 LOG_ERROR("%s: how to reset?", target_name(target));
1970 return ERROR_FAIL;
1971 }
1972
1973 /* registers are now invalid */
1974 if (target_was_examined(target)) {
1975 register_cache_invalidate(armv8->arm.core_cache);
1976 register_cache_invalidate(armv8->arm.core_cache->next);
1977 }
1978
1979 target->state = TARGET_RESET;
1980
1981 return ERROR_OK;
1982 }
1983
1984 static int aarch64_deassert_reset(struct target *target)
1985 {
1986 int retval;
1987
1988 LOG_DEBUG(" ");
1989
1990 /* be certain SRST is off */
1991 adapter_deassert_reset();
1992
1993 if (!target_was_examined(target))
1994 return ERROR_OK;
1995
1996 retval = aarch64_init_debug_access(target);
1997 if (retval != ERROR_OK)
1998 return retval;
1999
2000 retval = aarch64_poll(target);
2001 if (retval != ERROR_OK)
2002 return retval;
2003
2004 if (target->reset_halt) {
2005 /* clear pending Reset Catch debug event */
2006 retval = aarch64_clear_reset_catch(target);
2007 if (retval != ERROR_OK)
2008 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2009 target_name(target));
2010
2011 /* disable Reset Catch debug event */
2012 retval = aarch64_enable_reset_catch(target, false);
2013 if (retval != ERROR_OK)
2014 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2015 target_name(target));
2016
2017 if (target->state != TARGET_HALTED) {
2018 LOG_WARNING("%s: ran after reset and before halt ...",
2019 target_name(target));
2020 if (target_was_examined(target)) {
2021 retval = aarch64_halt_one(target, HALT_LAZY);
2022 if (retval != ERROR_OK)
2023 return retval;
2024 } else {
2025 target->state = TARGET_UNKNOWN;
2026 }
2027 }
2028 }
2029
2030 return ERROR_OK;
2031 }
2032
2033 static int aarch64_write_cpu_memory_slow(struct target *target,
2034 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2035 {
2036 struct armv8_common *armv8 = target_to_armv8(target);
2037 struct arm_dpm *dpm = &armv8->dpm;
2038 struct arm *arm = &armv8->arm;
2039 int retval;
2040
2041 armv8_reg_current(arm, 1)->dirty = true;
2042
2043 /* change DCC to normal mode if necessary */
2044 if (*dscr & DSCR_MA) {
2045 *dscr &= ~DSCR_MA;
2046 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2047 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2048 if (retval != ERROR_OK)
2049 return retval;
2050 }
2051
2052 while (count) {
2053 uint32_t data, opcode;
2054
2055 /* write the data to store into DTRRX */
2056 if (size == 1)
2057 data = *buffer;
2058 else if (size == 2)
2059 data = target_buffer_get_u16(target, buffer);
2060 else
2061 data = target_buffer_get_u32(target, buffer);
2062 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2063 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2064 if (retval != ERROR_OK)
2065 return retval;
2066
2067 if (arm->core_state == ARM_STATE_AARCH64)
2068 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2069 else
2070 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2071 if (retval != ERROR_OK)
2072 return retval;
2073
2074 if (size == 1)
2075 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2076 else if (size == 2)
2077 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2078 else
2079 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2080 retval = dpm->instr_execute(dpm, opcode);
2081 if (retval != ERROR_OK)
2082 return retval;
2083
2084 /* Advance */
2085 buffer += size;
2086 --count;
2087 }
2088
2089 return ERROR_OK;
2090 }
2091
2092 static int aarch64_write_cpu_memory_fast(struct target *target,
2093 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2094 {
2095 struct armv8_common *armv8 = target_to_armv8(target);
2096 struct arm *arm = &armv8->arm;
2097 int retval;
2098
2099 armv8_reg_current(arm, 1)->dirty = true;
2100
2101 /* Step 1.d - Change DCC to memory mode */
2102 *dscr |= DSCR_MA;
2103 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2104 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2105 if (retval != ERROR_OK)
2106 return retval;
2107
2108
2109 /* Step 2.a - Do the write */
2110 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2111 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2112 if (retval != ERROR_OK)
2113 return retval;
2114
2115 /* Step 3.a - Switch DTR mode back to Normal mode */
2116 *dscr &= ~DSCR_MA;
2117 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2118 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2119 if (retval != ERROR_OK)
2120 return retval;
2121
2122 return ERROR_OK;
2123 }
2124
2125 static int aarch64_write_cpu_memory(struct target *target,
2126 uint64_t address, uint32_t size,
2127 uint32_t count, const uint8_t *buffer)
2128 {
2129 /* write memory through APB-AP */
2130 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2131 struct armv8_common *armv8 = target_to_armv8(target);
2132 struct arm_dpm *dpm = &armv8->dpm;
2133 struct arm *arm = &armv8->arm;
2134 uint32_t dscr;
2135
2136 if (target->state != TARGET_HALTED) {
2137 LOG_WARNING("target not halted");
2138 return ERROR_TARGET_NOT_HALTED;
2139 }
2140
2141 /* Mark register X0 as dirty, as it will be used
2142 * for transferring the data.
2143 * It will be restored automatically when exiting
2144 * debug mode
2145 */
2146 armv8_reg_current(arm, 0)->dirty = true;
2147
2148 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2149
2150 /* Read DSCR */
2151 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2152 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2153 if (retval != ERROR_OK)
2154 return retval;
2155
2156 /* Set Normal access mode */
2157 dscr = (dscr & ~DSCR_MA);
2158 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2159 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2160 if (retval != ERROR_OK)
2161 return retval;
2162
2163 if (arm->core_state == ARM_STATE_AARCH64) {
2164 /* Write X0 with value 'address' using write procedure */
2165 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2166 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2167 retval = dpm->instr_write_data_dcc_64(dpm,
2168 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2169 } else {
2170 /* Write R0 with value 'address' using write procedure */
2171 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2172 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2173 retval = dpm->instr_write_data_dcc(dpm,
2174 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2175 }
2176
2177 if (retval != ERROR_OK)
2178 return retval;
2179
2180 if (size == 4 && (address % 4) == 0)
2181 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2182 else
2183 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2184
2185 if (retval != ERROR_OK) {
2186 /* Unset DTR mode */
2187 mem_ap_read_atomic_u32(armv8->debug_ap,
2188 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2189 dscr &= ~DSCR_MA;
2190 mem_ap_write_atomic_u32(armv8->debug_ap,
2191 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2192 }
2193
2194 /* Check for sticky abort flags in the DSCR */
2195 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2196 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2197 if (retval != ERROR_OK)
2198 return retval;
2199
2200 dpm->dscr = dscr;
2201 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2202 /* Abort occurred - clear it and exit */
2203 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2204 armv8_dpm_handle_exception(dpm, true);
2205 return ERROR_FAIL;
2206 }
2207
2208 /* Done */
2209 return ERROR_OK;
2210 }
2211
2212 static int aarch64_read_cpu_memory_slow(struct target *target,
2213 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2214 {
2215 struct armv8_common *armv8 = target_to_armv8(target);
2216 struct arm_dpm *dpm = &armv8->dpm;
2217 struct arm *arm = &armv8->arm;
2218 int retval;
2219
2220 armv8_reg_current(arm, 1)->dirty = true;
2221
2222 /* change DCC to normal mode (if necessary) */
2223 if (*dscr & DSCR_MA) {
2224 *dscr &= DSCR_MA;
2225 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2226 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2227 if (retval != ERROR_OK)
2228 return retval;
2229 }
2230
2231 while (count) {
2232 uint32_t opcode, data;
2233
2234 if (size == 1)
2235 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2236 else if (size == 2)
2237 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2238 else
2239 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2240 retval = dpm->instr_execute(dpm, opcode);
2241 if (retval != ERROR_OK)
2242 return retval;
2243
2244 if (arm->core_state == ARM_STATE_AARCH64)
2245 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2246 else
2247 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2248 if (retval != ERROR_OK)
2249 return retval;
2250
2251 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2252 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2253 if (retval != ERROR_OK)
2254 return retval;
2255
2256 if (size == 1)
2257 *buffer = (uint8_t)data;
2258 else if (size == 2)
2259 target_buffer_set_u16(target, buffer, (uint16_t)data);
2260 else
2261 target_buffer_set_u32(target, buffer, data);
2262
2263 /* Advance */
2264 buffer += size;
2265 --count;
2266 }
2267
2268 return ERROR_OK;
2269 }
2270
2271 static int aarch64_read_cpu_memory_fast(struct target *target,
2272 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2273 {
2274 struct armv8_common *armv8 = target_to_armv8(target);
2275 struct arm_dpm *dpm = &armv8->dpm;
2276 struct arm *arm = &armv8->arm;
2277 int retval;
2278 uint32_t value;
2279
2280 /* Mark X1 as dirty */
2281 armv8_reg_current(arm, 1)->dirty = true;
2282
2283 if (arm->core_state == ARM_STATE_AARCH64) {
2284 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2285 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2286 } else {
2287 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2288 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2289 }
2290
2291 if (retval != ERROR_OK)
2292 return retval;
2293
2294 /* Step 1.e - Change DCC to memory mode */
2295 *dscr |= DSCR_MA;
2296 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2297 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2298 if (retval != ERROR_OK)
2299 return retval;
2300
2301 /* Step 1.f - read DBGDTRTX and discard the value */
2302 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2303 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2304 if (retval != ERROR_OK)
2305 return retval;
2306
2307 count--;
2308 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2309 * Abort flags are sticky, so can be read at end of transactions
2310 *
2311 * This data is read in aligned to 32 bit boundary.
2312 */
2313
2314 if (count) {
2315 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2316 * increments X0 by 4. */
2317 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2318 armv8->debug_base + CPUV8_DBG_DTRTX);
2319 if (retval != ERROR_OK)
2320 return retval;
2321 }
2322
2323 /* Step 3.a - set DTR access mode back to Normal mode */
2324 *dscr &= ~DSCR_MA;
2325 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2326 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2327 if (retval != ERROR_OK)
2328 return retval;
2329
2330 /* Step 3.b - read DBGDTRTX for the final value */
2331 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2332 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2333 if (retval != ERROR_OK)
2334 return retval;
2335
2336 target_buffer_set_u32(target, buffer + count * 4, value);
2337 return retval;
2338 }
2339
2340 static int aarch64_read_cpu_memory(struct target *target,
2341 target_addr_t address, uint32_t size,
2342 uint32_t count, uint8_t *buffer)
2343 {
2344 /* read memory through APB-AP */
2345 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2346 struct armv8_common *armv8 = target_to_armv8(target);
2347 struct arm_dpm *dpm = &armv8->dpm;
2348 struct arm *arm = &armv8->arm;
2349 uint32_t dscr;
2350
2351 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2352 address, size, count);
2353
2354 if (target->state != TARGET_HALTED) {
2355 LOG_WARNING("target not halted");
2356 return ERROR_TARGET_NOT_HALTED;
2357 }
2358
2359 /* Mark register X0 as dirty, as it will be used
2360 * for transferring the data.
2361 * It will be restored automatically when exiting
2362 * debug mode
2363 */
2364 armv8_reg_current(arm, 0)->dirty = true;
2365
2366 /* Read DSCR */
2367 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2368 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2369 if (retval != ERROR_OK)
2370 return retval;
2371
2372 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2373
2374 /* Set Normal access mode */
2375 dscr &= ~DSCR_MA;
2376 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2377 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2378 if (retval != ERROR_OK)
2379 return retval;
2380
2381 if (arm->core_state == ARM_STATE_AARCH64) {
2382 /* Write X0 with value 'address' using write procedure */
2383 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2384 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2385 retval = dpm->instr_write_data_dcc_64(dpm,
2386 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2387 } else {
2388 /* Write R0 with value 'address' using write procedure */
2389 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2390 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2391 retval = dpm->instr_write_data_dcc(dpm,
2392 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2393 }
2394
2395 if (retval != ERROR_OK)
2396 return retval;
2397
2398 if (size == 4 && (address % 4) == 0)
2399 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2400 else
2401 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2402
2403 if (dscr & DSCR_MA) {
2404 dscr &= ~DSCR_MA;
2405 mem_ap_write_atomic_u32(armv8->debug_ap,
2406 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2407 }
2408
2409 if (retval != ERROR_OK)
2410 return retval;
2411
2412 /* Check for sticky abort flags in the DSCR */
2413 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2414 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2415 if (retval != ERROR_OK)
2416 return retval;
2417
2418 dpm->dscr = dscr;
2419
2420 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2421 /* Abort occurred - clear it and exit */
2422 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2423 armv8_dpm_handle_exception(dpm, true);
2424 return ERROR_FAIL;
2425 }
2426
2427 /* Done */
2428 return ERROR_OK;
2429 }
2430
2431 static int aarch64_read_phys_memory(struct target *target,
2432 target_addr_t address, uint32_t size,
2433 uint32_t count, uint8_t *buffer)
2434 {
2435 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2436
2437 if (count && buffer) {
2438 /* read memory through APB-AP */
2439 retval = aarch64_mmu_modify(target, 0);
2440 if (retval != ERROR_OK)
2441 return retval;
2442 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2443 }
2444 return retval;
2445 }
2446
2447 static int aarch64_read_memory(struct target *target, target_addr_t address,
2448 uint32_t size, uint32_t count, uint8_t *buffer)
2449 {
2450 int mmu_enabled = 0;
2451 int retval;
2452
2453 /* determine if MMU was enabled on target stop */
2454 retval = aarch64_mmu(target, &mmu_enabled);
2455 if (retval != ERROR_OK)
2456 return retval;
2457
2458 if (mmu_enabled) {
2459 /* enable MMU as we could have disabled it for phys access */
2460 retval = aarch64_mmu_modify(target, 1);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 }
2464 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2465 }
2466
2467 static int aarch64_write_phys_memory(struct target *target,
2468 target_addr_t address, uint32_t size,
2469 uint32_t count, const uint8_t *buffer)
2470 {
2471 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2472
2473 if (count && buffer) {
2474 /* write memory through APB-AP */
2475 retval = aarch64_mmu_modify(target, 0);
2476 if (retval != ERROR_OK)
2477 return retval;
2478 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2479 }
2480
2481 return retval;
2482 }
2483
2484 static int aarch64_write_memory(struct target *target, target_addr_t address,
2485 uint32_t size, uint32_t count, const uint8_t *buffer)
2486 {
2487 int mmu_enabled = 0;
2488 int retval;
2489
2490 /* determine if MMU was enabled on target stop */
2491 retval = aarch64_mmu(target, &mmu_enabled);
2492 if (retval != ERROR_OK)
2493 return retval;
2494
2495 if (mmu_enabled) {
2496 /* enable MMU as we could have disabled it for phys access */
2497 retval = aarch64_mmu_modify(target, 1);
2498 if (retval != ERROR_OK)
2499 return retval;
2500 }
2501 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2502 }
2503
2504 static int aarch64_handle_target_request(void *priv)
2505 {
2506 struct target *target = priv;
2507 struct armv8_common *armv8 = target_to_armv8(target);
2508 int retval;
2509
2510 if (!target_was_examined(target))
2511 return ERROR_OK;
2512 if (!target->dbg_msg_enabled)
2513 return ERROR_OK;
2514
2515 if (target->state == TARGET_RUNNING) {
2516 uint32_t request;
2517 uint32_t dscr;
2518 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2519 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2520
2521 /* check if we have data */
2522 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2523 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2524 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2525 if (retval == ERROR_OK) {
2526 target_request(target, request);
2527 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2528 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2529 }
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 static int aarch64_examine_first(struct target *target)
2537 {
2538 struct aarch64_common *aarch64 = target_to_aarch64(target);
2539 struct armv8_common *armv8 = &aarch64->armv8_common;
2540 struct adiv5_dap *swjdp = armv8->arm.dap;
2541 struct aarch64_private_config *pc = target->private_config;
2542 int i;
2543 int retval = ERROR_OK;
2544 uint64_t debug, ttypr;
2545 uint32_t cpuid;
2546 uint32_t tmp0, tmp1, tmp2, tmp3;
2547 debug = ttypr = cpuid = 0;
2548
2549 if (!pc)
2550 return ERROR_FAIL;
2551
2552 if (!armv8->debug_ap) {
2553 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2554 /* Search for the APB-AB */
2555 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2556 if (retval != ERROR_OK) {
2557 LOG_ERROR("Could not find APB-AP for debug access");
2558 return retval;
2559 }
2560 } else {
2561 armv8->debug_ap = dap_get_ap(swjdp, pc->adiv5_config.ap_num);
2562 if (!armv8->debug_ap) {
2563 LOG_ERROR("Cannot get AP");
2564 return ERROR_FAIL;
2565 }
2566 }
2567 }
2568
2569 retval = mem_ap_init(armv8->debug_ap);
2570 if (retval != ERROR_OK) {
2571 LOG_ERROR("Could not initialize the APB-AP");
2572 return retval;
2573 }
2574
2575 armv8->debug_ap->memaccess_tck = 10;
2576
2577 if (!target->dbgbase_set) {
2578 /* Lookup Processor DAP */
2579 retval = dap_lookup_cs_component(armv8->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2580 &armv8->debug_base, target->coreid);
2581 if (retval != ERROR_OK)
2582 return retval;
2583 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2584 target->coreid, armv8->debug_base);
2585 } else
2586 armv8->debug_base = target->dbgbase;
2587
2588 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2589 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2590 if (retval != ERROR_OK) {
2591 LOG_DEBUG("Examine %s failed", "oslock");
2592 return retval;
2593 }
2594
2595 retval = mem_ap_read_u32(armv8->debug_ap,
2596 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2597 if (retval != ERROR_OK) {
2598 LOG_DEBUG("Examine %s failed", "CPUID");
2599 return retval;
2600 }
2601
2602 retval = mem_ap_read_u32(armv8->debug_ap,
2603 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2604 retval += mem_ap_read_u32(armv8->debug_ap,
2605 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2606 if (retval != ERROR_OK) {
2607 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2608 return retval;
2609 }
2610 retval = mem_ap_read_u32(armv8->debug_ap,
2611 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2612 retval += mem_ap_read_u32(armv8->debug_ap,
2613 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2614 if (retval != ERROR_OK) {
2615 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2616 return retval;
2617 }
2618
2619 retval = dap_run(armv8->debug_ap->dap);
2620 if (retval != ERROR_OK) {
2621 LOG_ERROR("%s: examination failed\n", target_name(target));
2622 return retval;
2623 }
2624
2625 ttypr |= tmp1;
2626 ttypr = (ttypr << 32) | tmp0;
2627 debug |= tmp3;
2628 debug = (debug << 32) | tmp2;
2629
2630 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2631 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2632 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2633
2634 if (!pc->cti) {
2635 LOG_TARGET_ERROR(target, "CTI not specified");
2636 return ERROR_FAIL;
2637 }
2638
2639 armv8->cti = pc->cti;
2640
2641 retval = aarch64_dpm_setup(aarch64, debug);
2642 if (retval != ERROR_OK)
2643 return retval;
2644
2645 /* Setup Breakpoint Register Pairs */
2646 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2647 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2648 aarch64->brp_num_available = aarch64->brp_num;
2649 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2650 for (i = 0; i < aarch64->brp_num; i++) {
2651 aarch64->brp_list[i].used = 0;
2652 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2653 aarch64->brp_list[i].type = BRP_NORMAL;
2654 else
2655 aarch64->brp_list[i].type = BRP_CONTEXT;
2656 aarch64->brp_list[i].value = 0;
2657 aarch64->brp_list[i].control = 0;
2658 aarch64->brp_list[i].brpn = i;
2659 }
2660
2661 /* Setup Watchpoint Register Pairs */
2662 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2663 aarch64->wp_num_available = aarch64->wp_num;
2664 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2665 for (i = 0; i < aarch64->wp_num; i++) {
2666 aarch64->wp_list[i].used = 0;
2667 aarch64->wp_list[i].type = BRP_NORMAL;
2668 aarch64->wp_list[i].value = 0;
2669 aarch64->wp_list[i].control = 0;
2670 aarch64->wp_list[i].brpn = i;
2671 }
2672
2673 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2674 aarch64->brp_num, aarch64->wp_num);
2675
2676 target->state = TARGET_UNKNOWN;
2677 target->debug_reason = DBG_REASON_NOTHALTED;
2678 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2679 target_set_examined(target);
2680 return ERROR_OK;
2681 }
2682
2683 static int aarch64_examine(struct target *target)
2684 {
2685 int retval = ERROR_OK;
2686
2687 /* don't re-probe hardware after each reset */
2688 if (!target_was_examined(target))
2689 retval = aarch64_examine_first(target);
2690
2691 /* Configure core debug access */
2692 if (retval == ERROR_OK)
2693 retval = aarch64_init_debug_access(target);
2694
2695 return retval;
2696 }
2697
2698 /*
2699 * Cortex-A8 target creation and initialization
2700 */
2701
2702 static int aarch64_init_target(struct command_context *cmd_ctx,
2703 struct target *target)
2704 {
2705 /* examine_first() does a bunch of this */
2706 arm_semihosting_init(target);
2707 return ERROR_OK;
2708 }
2709
2710 static int aarch64_init_arch_info(struct target *target,
2711 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2712 {
2713 struct armv8_common *armv8 = &aarch64->armv8_common;
2714
2715 /* Setup struct aarch64_common */
2716 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2717 armv8->arm.dap = dap;
2718
2719 /* register arch-specific functions */
2720 armv8->examine_debug_reason = NULL;
2721 armv8->post_debug_entry = aarch64_post_debug_entry;
2722 armv8->pre_restore_context = NULL;
2723 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2724
2725 armv8_init_arch_info(target, armv8);
2726 target_register_timer_callback(aarch64_handle_target_request, 1,
2727 TARGET_TIMER_TYPE_PERIODIC, target);
2728
2729 return ERROR_OK;
2730 }
2731
2732 static int armv8r_target_create(struct target *target, Jim_Interp *interp)
2733 {
2734 struct aarch64_private_config *pc = target->private_config;
2735 struct aarch64_common *aarch64;
2736
2737 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2738 return ERROR_FAIL;
2739
2740 aarch64 = calloc(1, sizeof(struct aarch64_common));
2741 if (!aarch64) {
2742 LOG_ERROR("Out of memory");
2743 return ERROR_FAIL;
2744 }
2745
2746 aarch64->armv8_common.is_armv8r = true;
2747
2748 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2749 }
2750
2751 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2752 {
2753 struct aarch64_private_config *pc = target->private_config;
2754 struct aarch64_common *aarch64;
2755
2756 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2757 return ERROR_FAIL;
2758
2759 aarch64 = calloc(1, sizeof(struct aarch64_common));
2760 if (!aarch64) {
2761 LOG_ERROR("Out of memory");
2762 return ERROR_FAIL;
2763 }
2764
2765 aarch64->armv8_common.is_armv8r = false;
2766
2767 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2768 }
2769
2770 static void aarch64_deinit_target(struct target *target)
2771 {
2772 struct aarch64_common *aarch64 = target_to_aarch64(target);
2773 struct armv8_common *armv8 = &aarch64->armv8_common;
2774 struct arm_dpm *dpm = &armv8->dpm;
2775
2776 if (armv8->debug_ap)
2777 dap_put_ap(armv8->debug_ap);
2778
2779 armv8_free_reg_cache(target);
2780 free(aarch64->brp_list);
2781 free(dpm->dbp);
2782 free(dpm->dwp);
2783 free(target->private_config);
2784 free(aarch64);
2785 }
2786
2787 static int aarch64_mmu(struct target *target, int *enabled)
2788 {
2789 struct aarch64_common *aarch64 = target_to_aarch64(target);
2790 struct armv8_common *armv8 = &aarch64->armv8_common;
2791 if (target->state != TARGET_HALTED) {
2792 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2793 return ERROR_TARGET_INVALID;
2794 }
2795 if (armv8->is_armv8r)
2796 *enabled = 0;
2797 else
2798 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2799 return ERROR_OK;
2800 }
2801
2802 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2803 target_addr_t *phys)
2804 {
2805 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2806 }
2807
2808 /*
2809 * private target configuration items
2810 */
2811 enum aarch64_cfg_param {
2812 CFG_CTI,
2813 };
2814
2815 static const struct jim_nvp nvp_config_opts[] = {
2816 { .name = "-cti", .value = CFG_CTI },
2817 { .name = NULL, .value = -1 }
2818 };
2819
2820 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2821 {
2822 struct aarch64_private_config *pc;
2823 struct jim_nvp *n;
2824 int e;
2825
2826 pc = (struct aarch64_private_config *)target->private_config;
2827 if (!pc) {
2828 pc = calloc(1, sizeof(struct aarch64_private_config));
2829 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2830 target->private_config = pc;
2831 }
2832
2833 /*
2834 * Call adiv5_jim_configure() to parse the common DAP options
2835 * It will return JIM_CONTINUE if it didn't find any known
2836 * options, JIM_OK if it correctly parsed the topmost option
2837 * and JIM_ERR if an error occurred during parameter evaluation.
2838 * For JIM_CONTINUE, we check our own params.
2839 *
2840 * adiv5_jim_configure() assumes 'private_config' to point to
2841 * 'struct adiv5_private_config'. Override 'private_config'!
2842 */
2843 target->private_config = &pc->adiv5_config;
2844 e = adiv5_jim_configure(target, goi);
2845 target->private_config = pc;
2846 if (e != JIM_CONTINUE)
2847 return e;
2848
2849 /* parse config or cget options ... */
2850 if (goi->argc > 0) {
2851 Jim_SetEmptyResult(goi->interp);
2852
2853 /* check first if topmost item is for us */
2854 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2855 goi->argv[0], &n);
2856 if (e != JIM_OK)
2857 return JIM_CONTINUE;
2858
2859 e = jim_getopt_obj(goi, NULL);
2860 if (e != JIM_OK)
2861 return e;
2862
2863 switch (n->value) {
2864 case CFG_CTI: {
2865 if (goi->isconfigure) {
2866 Jim_Obj *o_cti;
2867 struct arm_cti *cti;
2868 e = jim_getopt_obj(goi, &o_cti);
2869 if (e != JIM_OK)
2870 return e;
2871 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2872 if (!cti) {
2873 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2874 return JIM_ERR;
2875 }
2876 pc->cti = cti;
2877 } else {
2878 if (goi->argc != 0) {
2879 Jim_WrongNumArgs(goi->interp,
2880 goi->argc, goi->argv,
2881 "NO PARAMS");
2882 return JIM_ERR;
2883 }
2884
2885 if (!pc || !pc->cti) {
2886 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2887 return JIM_ERR;
2888 }
2889 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2890 }
2891 break;
2892 }
2893
2894 default:
2895 return JIM_CONTINUE;
2896 }
2897 }
2898
2899 return JIM_OK;
2900 }
2901
2902 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2903 {
2904 struct target *target = get_current_target(CMD_CTX);
2905 struct armv8_common *armv8 = target_to_armv8(target);
2906
2907 return armv8_handle_cache_info_command(CMD,
2908 &armv8->armv8_mmu.armv8_cache);
2909 }
2910
2911 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2912 {
2913 struct target *target = get_current_target(CMD_CTX);
2914 if (!target_was_examined(target)) {
2915 LOG_ERROR("target not examined yet");
2916 return ERROR_FAIL;
2917 }
2918
2919 return aarch64_init_debug_access(target);
2920 }
2921
2922 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2923 {
2924 struct target *target = get_current_target(CMD_CTX);
2925
2926 if (!target) {
2927 LOG_ERROR("No target selected");
2928 return ERROR_FAIL;
2929 }
2930
2931 struct aarch64_common *aarch64 = target_to_aarch64(target);
2932
2933 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2934 command_print(CMD, "current target isn't an AArch64");
2935 return ERROR_FAIL;
2936 }
2937
2938 int count = 1;
2939 target_addr_t address;
2940
2941 switch (CMD_ARGC) {
2942 case 2:
2943 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2944 /* FALL THROUGH */
2945 case 1:
2946 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2947 break;
2948 default:
2949 return ERROR_COMMAND_SYNTAX_ERROR;
2950 }
2951
2952 return a64_disassemble(CMD, target, address, count);
2953 }
2954
2955 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2956 {
2957 struct target *target = get_current_target(CMD_CTX);
2958 struct aarch64_common *aarch64 = target_to_aarch64(target);
2959
2960 static const struct jim_nvp nvp_maskisr_modes[] = {
2961 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2962 { .name = "on", .value = AARCH64_ISRMASK_ON },
2963 { .name = NULL, .value = -1 },
2964 };
2965 const struct jim_nvp *n;
2966
2967 if (CMD_ARGC > 0) {
2968 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2969 if (!n->name) {
2970 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2971 return ERROR_COMMAND_SYNTAX_ERROR;
2972 }
2973
2974 aarch64->isrmasking_mode = n->value;
2975 }
2976
2977 n = jim_nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2978 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2979
2980 return ERROR_OK;
2981 }
2982
2983 COMMAND_HANDLER(aarch64_mcrmrc_command)
2984 {
2985 bool is_mcr = false;
2986 unsigned int arg_cnt = 5;
2987
2988 if (!strcmp(CMD_NAME, "mcr")) {
2989 is_mcr = true;
2990 arg_cnt = 6;
2991 }
2992
2993 if (arg_cnt != CMD_ARGC)
2994 return ERROR_COMMAND_SYNTAX_ERROR;
2995
2996 struct target *target = get_current_target(CMD_CTX);
2997 if (!target) {
2998 command_print(CMD, "no current target");
2999 return ERROR_FAIL;
3000 }
3001 if (!target_was_examined(target)) {
3002 command_print(CMD, "%s: not yet examined", target_name(target));
3003 return ERROR_TARGET_NOT_EXAMINED;
3004 }
3005
3006 struct arm *arm = target_to_arm(target);
3007 if (!is_arm(arm)) {
3008 command_print(CMD, "%s: not an ARM", target_name(target));
3009 return ERROR_FAIL;
3010 }
3011
3012 if (target->state != TARGET_HALTED)
3013 return ERROR_TARGET_NOT_HALTED;
3014
3015 if (arm->core_state == ARM_STATE_AARCH64) {
3016 command_print(CMD, "%s: not 32-bit arm target", target_name(target));
3017 return ERROR_FAIL;
3018 }
3019
3020 int cpnum;
3021 uint32_t op1;
3022 uint32_t op2;
3023 uint32_t crn;
3024 uint32_t crm;
3025 uint32_t value;
3026
3027 /* NOTE: parameter sequence matches ARM instruction set usage:
3028 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3029 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3030 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3031 */
3032 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], cpnum);
3033 if (cpnum & ~0xf) {
3034 command_print(CMD, "coprocessor %d out of range", cpnum);
3035 return ERROR_COMMAND_ARGUMENT_INVALID;
3036 }
3037
3038 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], op1);
3039 if (op1 & ~0x7) {
3040 command_print(CMD, "op1 %d out of range", op1);
3041 return ERROR_COMMAND_ARGUMENT_INVALID;
3042 }
3043
3044 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], crn);
3045 if (crn & ~0xf) {
3046 command_print(CMD, "CRn %d out of range", crn);
3047 return ERROR_COMMAND_ARGUMENT_INVALID;
3048 }
3049
3050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], crm);
3051 if (crm & ~0xf) {
3052 command_print(CMD, "CRm %d out of range", crm);
3053 return ERROR_COMMAND_ARGUMENT_INVALID;
3054 }
3055
3056 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], op2);
3057 if (op2 & ~0x7) {
3058 command_print(CMD, "op2 %d out of range", op2);
3059 return ERROR_COMMAND_ARGUMENT_INVALID;
3060 }
3061
3062 if (is_mcr) {
3063 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[5], value);
3064
3065 /* NOTE: parameters reordered! */
3066 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3067 int retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3068 if (retval != ERROR_OK)
3069 return retval;
3070 } else {
3071 value = 0;
3072 /* NOTE: parameters reordered! */
3073 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3074 int retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3075 if (retval != ERROR_OK)
3076 return retval;
3077
3078 command_print(CMD, "0x%" PRIx32, value);
3079 }
3080
3081 return ERROR_OK;
3082 }
3083
3084 static const struct command_registration aarch64_exec_command_handlers[] = {
3085 {
3086 .name = "cache_info",
3087 .handler = aarch64_handle_cache_info_command,
3088 .mode = COMMAND_EXEC,
3089 .help = "display information about target caches",
3090 .usage = "",
3091 },
3092 {
3093 .name = "dbginit",
3094 .handler = aarch64_handle_dbginit_command,
3095 .mode = COMMAND_EXEC,
3096 .help = "Initialize core debug",
3097 .usage = "",
3098 },
3099 {
3100 .name = "disassemble",
3101 .handler = aarch64_handle_disassemble_command,
3102 .mode = COMMAND_EXEC,
3103 .help = "Disassemble instructions",
3104 .usage = "address [count]",
3105 },
3106 {
3107 .name = "maskisr",
3108 .handler = aarch64_mask_interrupts_command,
3109 .mode = COMMAND_ANY,
3110 .help = "mask aarch64 interrupts during single-step",
3111 .usage = "['on'|'off']",
3112 },
3113 {
3114 .name = "mcr",
3115 .mode = COMMAND_EXEC,
3116 .handler = aarch64_mcrmrc_command,
3117 .help = "write coprocessor register",
3118 .usage = "cpnum op1 CRn CRm op2 value",
3119 },
3120 {
3121 .name = "mrc",
3122 .mode = COMMAND_EXEC,
3123 .handler = aarch64_mcrmrc_command,
3124 .help = "read coprocessor register",
3125 .usage = "cpnum op1 CRn CRm op2",
3126 },
3127 {
3128 .chain = smp_command_handlers,
3129 },
3130
3131
3132 COMMAND_REGISTRATION_DONE
3133 };
3134
3135 static const struct command_registration aarch64_command_handlers[] = {
3136 {
3137 .name = "arm",
3138 .mode = COMMAND_ANY,
3139 .help = "ARM Command Group",
3140 .usage = "",
3141 .chain = semihosting_common_handlers
3142 },
3143 {
3144 .chain = armv8_command_handlers,
3145 },
3146 {
3147 .name = "aarch64",
3148 .mode = COMMAND_ANY,
3149 .help = "Aarch64 command group",
3150 .usage = "",
3151 .chain = aarch64_exec_command_handlers,
3152 },
3153 COMMAND_REGISTRATION_DONE
3154 };
3155
3156 struct target_type aarch64_target = {
3157 .name = "aarch64",
3158
3159 .poll = aarch64_poll,
3160 .arch_state = armv8_arch_state,
3161
3162 .halt = aarch64_halt,
3163 .resume = aarch64_resume,
3164 .step = aarch64_step,
3165
3166 .assert_reset = aarch64_assert_reset,
3167 .deassert_reset = aarch64_deassert_reset,
3168
3169 /* REVISIT allow exporting VFP3 registers ... */
3170 .get_gdb_arch = armv8_get_gdb_arch,
3171 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3172
3173 .read_memory = aarch64_read_memory,
3174 .write_memory = aarch64_write_memory,
3175
3176 .add_breakpoint = aarch64_add_breakpoint,
3177 .add_context_breakpoint = aarch64_add_context_breakpoint,
3178 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3179 .remove_breakpoint = aarch64_remove_breakpoint,
3180 .add_watchpoint = aarch64_add_watchpoint,
3181 .remove_watchpoint = aarch64_remove_watchpoint,
3182 .hit_watchpoint = aarch64_hit_watchpoint,
3183
3184 .commands = aarch64_command_handlers,
3185 .target_create = aarch64_target_create,
3186 .target_jim_configure = aarch64_jim_configure,
3187 .init_target = aarch64_init_target,
3188 .deinit_target = aarch64_deinit_target,
3189 .examine = aarch64_examine,
3190
3191 .read_phys_memory = aarch64_read_phys_memory,
3192 .write_phys_memory = aarch64_write_phys_memory,
3193 .mmu = aarch64_mmu,
3194 .virt2phys = aarch64_virt2phys,
3195 };
3196
3197 struct target_type armv8r_target = {
3198 .name = "armv8r",
3199
3200 .poll = aarch64_poll,
3201 .arch_state = armv8_arch_state,
3202
3203 .halt = aarch64_halt,
3204 .resume = aarch64_resume,
3205 .step = aarch64_step,
3206
3207 .assert_reset = aarch64_assert_reset,
3208 .deassert_reset = aarch64_deassert_reset,
3209
3210 /* REVISIT allow exporting VFP3 registers ... */
3211 .get_gdb_arch = armv8_get_gdb_arch,
3212 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3213
3214 .read_memory = aarch64_read_phys_memory,
3215 .write_memory = aarch64_write_phys_memory,
3216
3217 .add_breakpoint = aarch64_add_breakpoint,
3218 .add_context_breakpoint = aarch64_add_context_breakpoint,
3219 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3220 .remove_breakpoint = aarch64_remove_breakpoint,
3221 .add_watchpoint = aarch64_add_watchpoint,
3222 .remove_watchpoint = aarch64_remove_watchpoint,
3223 .hit_watchpoint = aarch64_hit_watchpoint,
3224
3225 .commands = aarch64_command_handlers,
3226 .target_create = armv8r_target_create,
3227 .target_jim_configure = aarch64_jim_configure,
3228 .init_target = aarch64_init_target,
3229 .deinit_target = aarch64_deinit_target,
3230 .examine = aarch64_examine,
3231 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)