target/smp: use a struct list_head to hold the smp targets
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "a64_disassembler.h"
27 #include "register.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_coresight.h"
33 #include "arm_semihosting.h"
34 #include "jtag/interface.h"
35 #include "smp.h"
36 #include <helper/time_support.h>
37
38 enum restart_mode {
39 RESTART_LAZY,
40 RESTART_SYNC,
41 };
42
43 enum halt_mode {
44 HALT_LAZY,
45 HALT_SYNC,
46 };
47
48 struct aarch64_private_config {
49 struct adiv5_private_config adiv5_config;
50 struct arm_cti *cti;
51 };
52
53 static int aarch64_poll(struct target *target);
54 static int aarch64_debug_entry(struct target *target);
55 static int aarch64_restore_context(struct target *target, bool bpwp);
56 static int aarch64_set_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int aarch64_set_context_breakpoint(struct target *target,
59 struct breakpoint *breakpoint, uint8_t matchmode);
60 static int aarch64_set_hybrid_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int aarch64_unset_breakpoint(struct target *target,
63 struct breakpoint *breakpoint);
64 static int aarch64_mmu(struct target *target, int *enabled);
65 static int aarch64_virt2phys(struct target *target,
66 target_addr_t virt, target_addr_t *phys);
67 static int aarch64_read_cpu_memory(struct target *target,
68 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
69
70 static int aarch64_restore_system_control_reg(struct target *target)
71 {
72 enum arm_mode target_mode = ARM_MODE_ANY;
73 int retval = ERROR_OK;
74 uint32_t instr;
75
76 struct aarch64_common *aarch64 = target_to_aarch64(target);
77 struct armv8_common *armv8 = target_to_armv8(target);
78
79 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
80 aarch64->system_control_reg_curr = aarch64->system_control_reg;
81 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82
83 switch (armv8->arm.core_mode) {
84 case ARMV8_64_EL0T:
85 target_mode = ARMV8_64_EL1H;
86 /* fall through */
87 case ARMV8_64_EL1T:
88 case ARMV8_64_EL1H:
89 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
90 break;
91 case ARMV8_64_EL2T:
92 case ARMV8_64_EL2H:
93 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
94 break;
95 case ARMV8_64_EL3H:
96 case ARMV8_64_EL3T:
97 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
98 break;
99
100 case ARM_MODE_SVC:
101 case ARM_MODE_ABT:
102 case ARM_MODE_FIQ:
103 case ARM_MODE_IRQ:
104 case ARM_MODE_HYP:
105 case ARM_MODE_UND:
106 case ARM_MODE_SYS:
107 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
108 break;
109
110 default:
111 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
112 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
113 return ERROR_FAIL;
114 }
115
116 if (target_mode != ARM_MODE_ANY)
117 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
118
119 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
120 if (retval != ERROR_OK)
121 return retval;
122
123 if (target_mode != ARM_MODE_ANY)
124 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
125 }
126
127 return retval;
128 }
129
130 /* modify system_control_reg in order to enable or disable mmu for :
131 * - virt2phys address conversion
132 * - read or write memory in phys or virt address */
133 static int aarch64_mmu_modify(struct target *target, int enable)
134 {
135 struct aarch64_common *aarch64 = target_to_aarch64(target);
136 struct armv8_common *armv8 = &aarch64->armv8_common;
137 int retval = ERROR_OK;
138 enum arm_mode target_mode = ARM_MODE_ANY;
139 uint32_t instr = 0;
140
141 if (enable) {
142 /* if mmu enabled at target stop and mmu not enable */
143 if (!(aarch64->system_control_reg & 0x1U)) {
144 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
145 return ERROR_FAIL;
146 }
147 if (!(aarch64->system_control_reg_curr & 0x1U))
148 aarch64->system_control_reg_curr |= 0x1U;
149 } else {
150 if (aarch64->system_control_reg_curr & 0x4U) {
151 /* data cache is active */
152 aarch64->system_control_reg_curr &= ~0x4U;
153 /* flush data cache armv8 function to be called */
154 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
155 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
156 }
157 if ((aarch64->system_control_reg_curr & 0x1U)) {
158 aarch64->system_control_reg_curr &= ~0x1U;
159 }
160 }
161
162 switch (armv8->arm.core_mode) {
163 case ARMV8_64_EL0T:
164 target_mode = ARMV8_64_EL1H;
165 /* fall through */
166 case ARMV8_64_EL1T:
167 case ARMV8_64_EL1H:
168 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
169 break;
170 case ARMV8_64_EL2T:
171 case ARMV8_64_EL2H:
172 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
173 break;
174 case ARMV8_64_EL3H:
175 case ARMV8_64_EL3T:
176 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
177 break;
178
179 case ARM_MODE_SVC:
180 case ARM_MODE_ABT:
181 case ARM_MODE_FIQ:
182 case ARM_MODE_IRQ:
183 case ARM_MODE_HYP:
184 case ARM_MODE_UND:
185 case ARM_MODE_SYS:
186 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
187 break;
188
189 default:
190 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
191 break;
192 }
193 if (target_mode != ARM_MODE_ANY)
194 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
195
196 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
197 aarch64->system_control_reg_curr);
198
199 if (target_mode != ARM_MODE_ANY)
200 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
201
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG("%s", target_name(target));
215
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
218 if (retval != ERROR_OK) {
219 LOG_DEBUG("Examine %s failed", "oslock");
220 return retval;
221 }
222
223 /* Clear Sticky Power Down status Bit in PRSR to enable access to
224 the registers in the Core Power Domain */
225 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
226 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
227 if (retval != ERROR_OK)
228 return retval;
229
230 /*
231 * Static CTI configuration:
232 * Channel 0 -> trigger outputs HALT request to PE
233 * Channel 1 -> trigger outputs Resume request to PE
234 * Gate all channel trigger events from entering the CTM
235 */
236
237 /* Enable CTI */
238 retval = arm_cti_enable(armv8->cti, true);
239 /* By default, gate all channel events to and from the CTM */
240 if (retval == ERROR_OK)
241 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
242 /* output halt requests to PE on channel 0 event */
243 if (retval == ERROR_OK)
244 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
245 /* output restart requests to PE on channel 1 event */
246 if (retval == ERROR_OK)
247 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
248 if (retval != ERROR_OK)
249 return retval;
250
251 /* Resync breakpoint registers */
252
253 return ERROR_OK;
254 }
255
256 /* Write to memory mapped registers directly with no cache or mmu handling */
257 static int aarch64_dap_write_memap_register_u32(struct target *target,
258 target_addr_t address,
259 uint32_t value)
260 {
261 int retval;
262 struct armv8_common *armv8 = target_to_armv8(target);
263
264 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
265
266 return retval;
267 }
268
269 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
270 {
271 struct arm_dpm *dpm = &a8->armv8_common.dpm;
272 int retval;
273
274 dpm->arm = &a8->armv8_common.arm;
275 dpm->didr = debug;
276
277 retval = armv8_dpm_setup(dpm);
278 if (retval == ERROR_OK)
279 retval = armv8_dpm_initialize(dpm);
280
281 return retval;
282 }
283
284 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
285 {
286 struct armv8_common *armv8 = target_to_armv8(target);
287 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
288 }
289
290 static int aarch64_check_state_one(struct target *target,
291 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
292 {
293 struct armv8_common *armv8 = target_to_armv8(target);
294 uint32_t prsr;
295 int retval;
296
297 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
298 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
299 if (retval != ERROR_OK)
300 return retval;
301
302 if (p_prsr)
303 *p_prsr = prsr;
304
305 if (p_result)
306 *p_result = (prsr & mask) == (val & mask);
307
308 return ERROR_OK;
309 }
310
311 static int aarch64_wait_halt_one(struct target *target)
312 {
313 int retval = ERROR_OK;
314 uint32_t prsr;
315
316 int64_t then = timeval_ms();
317 for (;;) {
318 int halted;
319
320 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
321 if (retval != ERROR_OK || halted)
322 break;
323
324 if (timeval_ms() > then + 1000) {
325 retval = ERROR_TARGET_TIMEOUT;
326 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
327 break;
328 }
329 }
330 return retval;
331 }
332
333 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
334 {
335 int retval = ERROR_OK;
336 struct target_list *head;
337 struct target *first = NULL;
338
339 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
340
341 foreach_smp_target(head, target->smp_targets) {
342 struct target *curr = head->target;
343 struct armv8_common *armv8 = target_to_armv8(curr);
344
345 if (exc_target && curr == target)
346 continue;
347 if (!target_was_examined(curr))
348 continue;
349 if (curr->state != TARGET_RUNNING)
350 continue;
351
352 /* HACK: mark this target as prepared for halting */
353 curr->debug_reason = DBG_REASON_DBGRQ;
354
355 /* open the gate for channel 0 to let HALT requests pass to the CTM */
356 retval = arm_cti_ungate_channel(armv8->cti, 0);
357 if (retval == ERROR_OK)
358 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
359 if (retval != ERROR_OK)
360 break;
361
362 LOG_DEBUG("target %s prepared", target_name(curr));
363
364 if (!first)
365 first = curr;
366 }
367
368 if (p_first) {
369 if (exc_target && first)
370 *p_first = first;
371 else
372 *p_first = target;
373 }
374
375 return retval;
376 }
377
378 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
379 {
380 int retval = ERROR_OK;
381 struct armv8_common *armv8 = target_to_armv8(target);
382
383 LOG_DEBUG("%s", target_name(target));
384
385 /* allow Halting Debug Mode */
386 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
387 if (retval != ERROR_OK)
388 return retval;
389
390 /* trigger an event on channel 0, this outputs a halt request to the PE */
391 retval = arm_cti_pulse_channel(armv8->cti, 0);
392 if (retval != ERROR_OK)
393 return retval;
394
395 if (mode == HALT_SYNC) {
396 retval = aarch64_wait_halt_one(target);
397 if (retval != ERROR_OK) {
398 if (retval == ERROR_TARGET_TIMEOUT)
399 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
400 return retval;
401 }
402 }
403
404 return ERROR_OK;
405 }
406
407 static int aarch64_halt_smp(struct target *target, bool exc_target)
408 {
409 struct target *next = target;
410 int retval;
411
412 /* prepare halt on all PEs of the group */
413 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
414
415 if (exc_target && next == target)
416 return retval;
417
418 /* halt the target PE */
419 if (retval == ERROR_OK)
420 retval = aarch64_halt_one(next, HALT_LAZY);
421
422 if (retval != ERROR_OK)
423 return retval;
424
425 /* wait for all PEs to halt */
426 int64_t then = timeval_ms();
427 for (;;) {
428 bool all_halted = true;
429 struct target_list *head;
430 struct target *curr;
431
432 foreach_smp_target(head, target->smp_targets) {
433 int halted;
434
435 curr = head->target;
436
437 if (!target_was_examined(curr))
438 continue;
439
440 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
441 if (retval != ERROR_OK || !halted) {
442 all_halted = false;
443 break;
444 }
445 }
446
447 if (all_halted)
448 break;
449
450 if (timeval_ms() > then + 1000) {
451 retval = ERROR_TARGET_TIMEOUT;
452 break;
453 }
454
455 /*
456 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
457 * and it looks like the CTI's are not connected by a common
458 * trigger matrix. It seems that we need to halt one core in each
459 * cluster explicitly. So if we find that a core has not halted
460 * yet, we trigger an explicit halt for the second cluster.
461 */
462 retval = aarch64_halt_one(curr, HALT_LAZY);
463 if (retval != ERROR_OK)
464 break;
465 }
466
467 return retval;
468 }
469
470 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
471 {
472 struct target *gdb_target = NULL;
473 struct target_list *head;
474 struct target *curr;
475
476 if (debug_reason == DBG_REASON_NOTHALTED) {
477 LOG_DEBUG("Halting remaining targets in SMP group");
478 aarch64_halt_smp(target, true);
479 }
480
481 /* poll all targets in the group, but skip the target that serves GDB */
482 foreach_smp_target(head, target->smp_targets) {
483 curr = head->target;
484 /* skip calling context */
485 if (curr == target)
486 continue;
487 if (!target_was_examined(curr))
488 continue;
489 /* skip targets that were already halted */
490 if (curr->state == TARGET_HALTED)
491 continue;
492 /* remember the gdb_service->target */
493 if (curr->gdb_service)
494 gdb_target = curr->gdb_service->target;
495 /* skip it */
496 if (curr == gdb_target)
497 continue;
498
499 /* avoid recursion in aarch64_poll() */
500 curr->smp = 0;
501 aarch64_poll(curr);
502 curr->smp = 1;
503 }
504
505 /* after all targets were updated, poll the gdb serving target */
506 if (gdb_target && gdb_target != target)
507 aarch64_poll(gdb_target);
508
509 return ERROR_OK;
510 }
511
512 /*
513 * Aarch64 Run control
514 */
515
516 static int aarch64_poll(struct target *target)
517 {
518 enum target_state prev_target_state;
519 int retval = ERROR_OK;
520 int halted;
521
522 retval = aarch64_check_state_one(target,
523 PRSR_HALT, PRSR_HALT, &halted, NULL);
524 if (retval != ERROR_OK)
525 return retval;
526
527 if (halted) {
528 prev_target_state = target->state;
529 if (prev_target_state != TARGET_HALTED) {
530 enum target_debug_reason debug_reason = target->debug_reason;
531
532 /* We have a halting debug event */
533 target->state = TARGET_HALTED;
534 LOG_DEBUG("Target %s halted", target_name(target));
535 retval = aarch64_debug_entry(target);
536 if (retval != ERROR_OK)
537 return retval;
538
539 if (target->smp)
540 update_halt_gdb(target, debug_reason);
541
542 if (arm_semihosting(target, &retval) != 0)
543 return retval;
544
545 switch (prev_target_state) {
546 case TARGET_RUNNING:
547 case TARGET_UNKNOWN:
548 case TARGET_RESET:
549 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
550 break;
551 case TARGET_DEBUG_RUNNING:
552 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
553 break;
554 default:
555 break;
556 }
557 }
558 } else
559 target->state = TARGET_RUNNING;
560
561 return retval;
562 }
563
564 static int aarch64_halt(struct target *target)
565 {
566 struct armv8_common *armv8 = target_to_armv8(target);
567 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
568
569 if (target->smp)
570 return aarch64_halt_smp(target, false);
571
572 return aarch64_halt_one(target, HALT_SYNC);
573 }
574
575 static int aarch64_restore_one(struct target *target, int current,
576 uint64_t *address, int handle_breakpoints, int debug_execution)
577 {
578 struct armv8_common *armv8 = target_to_armv8(target);
579 struct arm *arm = &armv8->arm;
580 int retval;
581 uint64_t resume_pc;
582
583 LOG_DEBUG("%s", target_name(target));
584
585 if (!debug_execution)
586 target_free_all_working_areas(target);
587
588 /* current = 1: continue on current pc, otherwise continue at <address> */
589 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
590 if (!current)
591 resume_pc = *address;
592 else
593 *address = resume_pc;
594
595 /* Make sure that the Armv7 gdb thumb fixups does not
596 * kill the return address
597 */
598 switch (arm->core_state) {
599 case ARM_STATE_ARM:
600 resume_pc &= 0xFFFFFFFC;
601 break;
602 case ARM_STATE_AARCH64:
603 resume_pc &= 0xFFFFFFFFFFFFFFFC;
604 break;
605 case ARM_STATE_THUMB:
606 case ARM_STATE_THUMB_EE:
607 /* When the return address is loaded into PC
608 * bit 0 must be 1 to stay in Thumb state
609 */
610 resume_pc |= 0x1;
611 break;
612 case ARM_STATE_JAZELLE:
613 LOG_ERROR("How do I resume into Jazelle state??");
614 return ERROR_FAIL;
615 }
616 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
617 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
618 arm->pc->dirty = true;
619 arm->pc->valid = true;
620
621 /* called it now before restoring context because it uses cpu
622 * register r0 for restoring system control register */
623 retval = aarch64_restore_system_control_reg(target);
624 if (retval == ERROR_OK)
625 retval = aarch64_restore_context(target, handle_breakpoints);
626
627 return retval;
628 }
629
630 /**
631 * prepare single target for restart
632 *
633 *
634 */
635 static int aarch64_prepare_restart_one(struct target *target)
636 {
637 struct armv8_common *armv8 = target_to_armv8(target);
638 int retval;
639 uint32_t dscr;
640 uint32_t tmp;
641
642 LOG_DEBUG("%s", target_name(target));
643
644 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
645 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
646 if (retval != ERROR_OK)
647 return retval;
648
649 if ((dscr & DSCR_ITE) == 0)
650 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
651 if ((dscr & DSCR_ERR) != 0)
652 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
653
654 /* acknowledge a pending CTI halt event */
655 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
656 /*
657 * open the CTI gate for channel 1 so that the restart events
658 * get passed along to all PEs. Also close gate for channel 0
659 * to isolate the PE from halt events.
660 */
661 if (retval == ERROR_OK)
662 retval = arm_cti_ungate_channel(armv8->cti, 1);
663 if (retval == ERROR_OK)
664 retval = arm_cti_gate_channel(armv8->cti, 0);
665
666 /* make sure that DSCR.HDE is set */
667 if (retval == ERROR_OK) {
668 dscr |= DSCR_HDE;
669 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
670 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
671 }
672
673 if (retval == ERROR_OK) {
674 /* clear sticky bits in PRSR, SDR is now 0 */
675 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
676 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
677 }
678
679 return retval;
680 }
681
682 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
683 {
684 struct armv8_common *armv8 = target_to_armv8(target);
685 int retval;
686
687 LOG_DEBUG("%s", target_name(target));
688
689 /* trigger an event on channel 1, generates a restart request to the PE */
690 retval = arm_cti_pulse_channel(armv8->cti, 1);
691 if (retval != ERROR_OK)
692 return retval;
693
694 if (mode == RESTART_SYNC) {
695 int64_t then = timeval_ms();
696 for (;;) {
697 int resumed;
698 /*
699 * if PRSR.SDR is set now, the target did restart, even
700 * if it's now already halted again (e.g. due to breakpoint)
701 */
702 retval = aarch64_check_state_one(target,
703 PRSR_SDR, PRSR_SDR, &resumed, NULL);
704 if (retval != ERROR_OK || resumed)
705 break;
706
707 if (timeval_ms() > then + 1000) {
708 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
709 retval = ERROR_TARGET_TIMEOUT;
710 break;
711 }
712 }
713 }
714
715 if (retval != ERROR_OK)
716 return retval;
717
718 target->debug_reason = DBG_REASON_NOTHALTED;
719 target->state = TARGET_RUNNING;
720
721 return ERROR_OK;
722 }
723
724 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
725 {
726 int retval;
727
728 LOG_DEBUG("%s", target_name(target));
729
730 retval = aarch64_prepare_restart_one(target);
731 if (retval == ERROR_OK)
732 retval = aarch64_do_restart_one(target, mode);
733
734 return retval;
735 }
736
737 /*
738 * prepare all but the current target for restart
739 */
740 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
741 {
742 int retval = ERROR_OK;
743 struct target_list *head;
744 struct target *first = NULL;
745 uint64_t address;
746
747 foreach_smp_target(head, target->smp_targets) {
748 struct target *curr = head->target;
749
750 /* skip calling target */
751 if (curr == target)
752 continue;
753 if (!target_was_examined(curr))
754 continue;
755 if (curr->state != TARGET_HALTED)
756 continue;
757
758 /* resume at current address, not in step mode */
759 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
760 if (retval == ERROR_OK)
761 retval = aarch64_prepare_restart_one(curr);
762 if (retval != ERROR_OK) {
763 LOG_ERROR("failed to restore target %s", target_name(curr));
764 break;
765 }
766 /* remember the first valid target in the group */
767 if (!first)
768 first = curr;
769 }
770
771 if (p_first)
772 *p_first = first;
773
774 return retval;
775 }
776
777
778 static int aarch64_step_restart_smp(struct target *target)
779 {
780 int retval = ERROR_OK;
781 struct target_list *head;
782 struct target *first = NULL;
783
784 LOG_DEBUG("%s", target_name(target));
785
786 retval = aarch64_prep_restart_smp(target, 0, &first);
787 if (retval != ERROR_OK)
788 return retval;
789
790 if (first)
791 retval = aarch64_do_restart_one(first, RESTART_LAZY);
792 if (retval != ERROR_OK) {
793 LOG_DEBUG("error restarting target %s", target_name(first));
794 return retval;
795 }
796
797 int64_t then = timeval_ms();
798 for (;;) {
799 struct target *curr = target;
800 bool all_resumed = true;
801
802 foreach_smp_target(head, target->smp_targets) {
803 uint32_t prsr;
804 int resumed;
805
806 curr = head->target;
807
808 if (curr == target)
809 continue;
810
811 if (!target_was_examined(curr))
812 continue;
813
814 retval = aarch64_check_state_one(curr,
815 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
816 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
817 all_resumed = false;
818 break;
819 }
820
821 if (curr->state != TARGET_RUNNING) {
822 curr->state = TARGET_RUNNING;
823 curr->debug_reason = DBG_REASON_NOTHALTED;
824 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
825 }
826 }
827
828 if (all_resumed)
829 break;
830
831 if (timeval_ms() > then + 1000) {
832 LOG_ERROR("%s: timeout waiting for target resume", __func__);
833 retval = ERROR_TARGET_TIMEOUT;
834 break;
835 }
836 /*
837 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
838 * and it looks like the CTI's are not connected by a common
839 * trigger matrix. It seems that we need to halt one core in each
840 * cluster explicitly. So if we find that a core has not halted
841 * yet, we trigger an explicit resume for the second cluster.
842 */
843 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
844 if (retval != ERROR_OK)
845 break;
846 }
847
848 return retval;
849 }
850
851 static int aarch64_resume(struct target *target, int current,
852 target_addr_t address, int handle_breakpoints, int debug_execution)
853 {
854 int retval = 0;
855 uint64_t addr = address;
856
857 struct armv8_common *armv8 = target_to_armv8(target);
858 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
859
860 if (target->state != TARGET_HALTED)
861 return ERROR_TARGET_NOT_HALTED;
862
863 /*
864 * If this target is part of a SMP group, prepare the others
865 * targets for resuming. This involves restoring the complete
866 * target register context and setting up CTI gates to accept
867 * resume events from the trigger matrix.
868 */
869 if (target->smp) {
870 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
871 if (retval != ERROR_OK)
872 return retval;
873 }
874
875 /* all targets prepared, restore and restart the current target */
876 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
877 debug_execution);
878 if (retval == ERROR_OK)
879 retval = aarch64_restart_one(target, RESTART_SYNC);
880 if (retval != ERROR_OK)
881 return retval;
882
883 if (target->smp) {
884 int64_t then = timeval_ms();
885 for (;;) {
886 struct target *curr = target;
887 struct target_list *head;
888 bool all_resumed = true;
889
890 foreach_smp_target(head, target->smp_targets) {
891 uint32_t prsr;
892 int resumed;
893
894 curr = head->target;
895 if (curr == target)
896 continue;
897 if (!target_was_examined(curr))
898 continue;
899
900 retval = aarch64_check_state_one(curr,
901 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
902 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
903 all_resumed = false;
904 break;
905 }
906
907 if (curr->state != TARGET_RUNNING) {
908 curr->state = TARGET_RUNNING;
909 curr->debug_reason = DBG_REASON_NOTHALTED;
910 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
911 }
912 }
913
914 if (all_resumed)
915 break;
916
917 if (timeval_ms() > then + 1000) {
918 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
919 retval = ERROR_TARGET_TIMEOUT;
920 break;
921 }
922
923 /*
924 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
925 * and it looks like the CTI's are not connected by a common
926 * trigger matrix. It seems that we need to halt one core in each
927 * cluster explicitly. So if we find that a core has not halted
928 * yet, we trigger an explicit resume for the second cluster.
929 */
930 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
931 if (retval != ERROR_OK)
932 break;
933 }
934 }
935
936 if (retval != ERROR_OK)
937 return retval;
938
939 target->debug_reason = DBG_REASON_NOTHALTED;
940
941 if (!debug_execution) {
942 target->state = TARGET_RUNNING;
943 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
944 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
945 } else {
946 target->state = TARGET_DEBUG_RUNNING;
947 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
948 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
949 }
950
951 return ERROR_OK;
952 }
953
954 static int aarch64_debug_entry(struct target *target)
955 {
956 int retval = ERROR_OK;
957 struct armv8_common *armv8 = target_to_armv8(target);
958 struct arm_dpm *dpm = &armv8->dpm;
959 enum arm_state core_state;
960 uint32_t dscr;
961
962 /* make sure to clear all sticky errors */
963 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
964 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
965 if (retval == ERROR_OK)
966 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
967 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
968 if (retval == ERROR_OK)
969 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
970
971 if (retval != ERROR_OK)
972 return retval;
973
974 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
975
976 dpm->dscr = dscr;
977 core_state = armv8_dpm_get_core_state(dpm);
978 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
979 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
980
981 /* close the CTI gate for all events */
982 if (retval == ERROR_OK)
983 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
984 /* discard async exceptions */
985 if (retval == ERROR_OK)
986 retval = dpm->instr_cpsr_sync(dpm);
987 if (retval != ERROR_OK)
988 return retval;
989
990 /* Examine debug reason */
991 armv8_dpm_report_dscr(dpm, dscr);
992
993 /* save the memory address that triggered the watchpoint */
994 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
995 uint32_t tmp;
996
997 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
998 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
999 if (retval != ERROR_OK)
1000 return retval;
1001 target_addr_t edwar = tmp;
1002
1003 /* EDWAR[63:32] has unknown content in aarch32 state */
1004 if (core_state == ARM_STATE_AARCH64) {
1005 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1006 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
1007 if (retval != ERROR_OK)
1008 return retval;
1009 edwar |= ((target_addr_t)tmp) << 32;
1010 }
1011
1012 armv8->dpm.wp_addr = edwar;
1013 }
1014
1015 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1016
1017 if (retval == ERROR_OK && armv8->post_debug_entry)
1018 retval = armv8->post_debug_entry(target);
1019
1020 return retval;
1021 }
1022
1023 static int aarch64_post_debug_entry(struct target *target)
1024 {
1025 struct aarch64_common *aarch64 = target_to_aarch64(target);
1026 struct armv8_common *armv8 = &aarch64->armv8_common;
1027 int retval;
1028 enum arm_mode target_mode = ARM_MODE_ANY;
1029 uint32_t instr;
1030
1031 switch (armv8->arm.core_mode) {
1032 case ARMV8_64_EL0T:
1033 target_mode = ARMV8_64_EL1H;
1034 /* fall through */
1035 case ARMV8_64_EL1T:
1036 case ARMV8_64_EL1H:
1037 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1038 break;
1039 case ARMV8_64_EL2T:
1040 case ARMV8_64_EL2H:
1041 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1042 break;
1043 case ARMV8_64_EL3H:
1044 case ARMV8_64_EL3T:
1045 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1046 break;
1047
1048 case ARM_MODE_SVC:
1049 case ARM_MODE_ABT:
1050 case ARM_MODE_FIQ:
1051 case ARM_MODE_IRQ:
1052 case ARM_MODE_HYP:
1053 case ARM_MODE_UND:
1054 case ARM_MODE_SYS:
1055 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1056 break;
1057
1058 default:
1059 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1060 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1061 return ERROR_FAIL;
1062 }
1063
1064 if (target_mode != ARM_MODE_ANY)
1065 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1066
1067 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1068 if (retval != ERROR_OK)
1069 return retval;
1070
1071 if (target_mode != ARM_MODE_ANY)
1072 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1073
1074 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1075 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1076
1077 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1078 armv8_identify_cache(armv8);
1079 armv8_read_mpidr(armv8);
1080 }
1081
1082 armv8->armv8_mmu.mmu_enabled =
1083 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1084 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1085 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1086 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1087 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1088 return ERROR_OK;
1089 }
1090
1091 /*
1092 * single-step a target
1093 */
1094 static int aarch64_step(struct target *target, int current, target_addr_t address,
1095 int handle_breakpoints)
1096 {
1097 struct armv8_common *armv8 = target_to_armv8(target);
1098 struct aarch64_common *aarch64 = target_to_aarch64(target);
1099 int saved_retval = ERROR_OK;
1100 int retval;
1101 uint32_t edecr;
1102
1103 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1104
1105 if (target->state != TARGET_HALTED) {
1106 LOG_WARNING("target not halted");
1107 return ERROR_TARGET_NOT_HALTED;
1108 }
1109
1110 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1111 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1112 /* make sure EDECR.SS is not set when restoring the register */
1113
1114 if (retval == ERROR_OK) {
1115 edecr &= ~0x4;
1116 /* set EDECR.SS to enter hardware step mode */
1117 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1118 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1119 }
1120 /* disable interrupts while stepping */
1121 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1122 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1123 /* bail out if stepping setup has failed */
1124 if (retval != ERROR_OK)
1125 return retval;
1126
1127 if (target->smp && (current == 1)) {
1128 /*
1129 * isolate current target so that it doesn't get resumed
1130 * together with the others
1131 */
1132 retval = arm_cti_gate_channel(armv8->cti, 1);
1133 /* resume all other targets in the group */
1134 if (retval == ERROR_OK)
1135 retval = aarch64_step_restart_smp(target);
1136 if (retval != ERROR_OK) {
1137 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1138 return retval;
1139 }
1140 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1141 }
1142
1143 /* all other targets running, restore and restart the current target */
1144 retval = aarch64_restore_one(target, current, &address, 0, 0);
1145 if (retval == ERROR_OK)
1146 retval = aarch64_restart_one(target, RESTART_LAZY);
1147
1148 if (retval != ERROR_OK)
1149 return retval;
1150
1151 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1152 if (!handle_breakpoints)
1153 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1154
1155 int64_t then = timeval_ms();
1156 for (;;) {
1157 int stepped;
1158 uint32_t prsr;
1159
1160 retval = aarch64_check_state_one(target,
1161 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1162 if (retval != ERROR_OK || stepped)
1163 break;
1164
1165 if (timeval_ms() > then + 100) {
1166 LOG_ERROR("timeout waiting for target %s halt after step",
1167 target_name(target));
1168 retval = ERROR_TARGET_TIMEOUT;
1169 break;
1170 }
1171 }
1172
1173 /*
1174 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1175 * causes a timeout. The core takes the step but doesn't complete it and so
1176 * debug state is never entered. However, you can manually halt the core
1177 * as an external debug even is also a WFI wakeup event.
1178 */
1179 if (retval == ERROR_TARGET_TIMEOUT)
1180 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1181
1182 /* restore EDECR */
1183 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1184 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* restore interrupts */
1189 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1190 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1191 if (retval != ERROR_OK)
1192 return ERROR_OK;
1193 }
1194
1195 if (saved_retval != ERROR_OK)
1196 return saved_retval;
1197
1198 return ERROR_OK;
1199 }
1200
1201 static int aarch64_restore_context(struct target *target, bool bpwp)
1202 {
1203 struct armv8_common *armv8 = target_to_armv8(target);
1204 struct arm *arm = &armv8->arm;
1205
1206 int retval;
1207
1208 LOG_DEBUG("%s", target_name(target));
1209
1210 if (armv8->pre_restore_context)
1211 armv8->pre_restore_context(target);
1212
1213 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1214 if (retval == ERROR_OK) {
1215 /* registers are now invalid */
1216 register_cache_invalidate(arm->core_cache);
1217 register_cache_invalidate(arm->core_cache->next);
1218 }
1219
1220 return retval;
1221 }
1222
1223 /*
1224 * Cortex-A8 Breakpoint and watchpoint functions
1225 */
1226
1227 /* Setup hardware Breakpoint Register Pair */
1228 static int aarch64_set_breakpoint(struct target *target,
1229 struct breakpoint *breakpoint, uint8_t matchmode)
1230 {
1231 int retval;
1232 int brp_i = 0;
1233 uint32_t control;
1234 uint8_t byte_addr_select = 0x0F;
1235 struct aarch64_common *aarch64 = target_to_aarch64(target);
1236 struct armv8_common *armv8 = &aarch64->armv8_common;
1237 struct aarch64_brp *brp_list = aarch64->brp_list;
1238
1239 if (breakpoint->set) {
1240 LOG_WARNING("breakpoint already set");
1241 return ERROR_OK;
1242 }
1243
1244 if (breakpoint->type == BKPT_HARD) {
1245 int64_t bpt_value;
1246 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1247 brp_i++;
1248 if (brp_i >= aarch64->brp_num) {
1249 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1251 }
1252 breakpoint->set = brp_i + 1;
1253 if (breakpoint->length == 2)
1254 byte_addr_select = (3 << (breakpoint->address & 0x02));
1255 control = ((matchmode & 0x7) << 20)
1256 | (1 << 13)
1257 | (byte_addr_select << 5)
1258 | (3 << 1) | 1;
1259 brp_list[brp_i].used = 1;
1260 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1261 brp_list[brp_i].control = control;
1262 bpt_value = brp_list[brp_i].value;
1263
1264 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1265 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1266 (uint32_t)(bpt_value & 0xFFFFFFFF));
1267 if (retval != ERROR_OK)
1268 return retval;
1269 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1270 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1271 (uint32_t)(bpt_value >> 32));
1272 if (retval != ERROR_OK)
1273 return retval;
1274
1275 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1276 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1277 brp_list[brp_i].control);
1278 if (retval != ERROR_OK)
1279 return retval;
1280 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1281 brp_list[brp_i].control,
1282 brp_list[brp_i].value);
1283
1284 } else if (breakpoint->type == BKPT_SOFT) {
1285 uint32_t opcode;
1286 uint8_t code[4];
1287
1288 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1289 opcode = ARMV8_HLT(11);
1290
1291 if (breakpoint->length != 4)
1292 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1293 } else {
1294 /**
1295 * core_state is ARM_STATE_ARM
1296 * in that case the opcode depends on breakpoint length:
1297 * - if length == 4 => A32 opcode
1298 * - if length == 2 => T32 opcode
1299 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1300 * in that case the length should be changed from 3 to 4 bytes
1301 **/
1302 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1303 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1304
1305 if (breakpoint->length == 3)
1306 breakpoint->length = 4;
1307 }
1308
1309 buf_set_u32(code, 0, 32, opcode);
1310
1311 retval = target_read_memory(target,
1312 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1313 breakpoint->length, 1,
1314 breakpoint->orig_instr);
1315 if (retval != ERROR_OK)
1316 return retval;
1317
1318 armv8_cache_d_inner_flush_virt(armv8,
1319 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1320 breakpoint->length);
1321
1322 retval = target_write_memory(target,
1323 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1324 breakpoint->length, 1, code);
1325 if (retval != ERROR_OK)
1326 return retval;
1327
1328 armv8_cache_d_inner_flush_virt(armv8,
1329 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1330 breakpoint->length);
1331
1332 armv8_cache_i_inner_inval_virt(armv8,
1333 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1334 breakpoint->length);
1335
1336 breakpoint->set = 0x11; /* Any nice value but 0 */
1337 }
1338
1339 /* Ensure that halting debug mode is enable */
1340 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1341 if (retval != ERROR_OK) {
1342 LOG_DEBUG("Failed to set DSCR.HDE");
1343 return retval;
1344 }
1345
1346 return ERROR_OK;
1347 }
1348
1349 static int aarch64_set_context_breakpoint(struct target *target,
1350 struct breakpoint *breakpoint, uint8_t matchmode)
1351 {
1352 int retval = ERROR_FAIL;
1353 int brp_i = 0;
1354 uint32_t control;
1355 uint8_t byte_addr_select = 0x0F;
1356 struct aarch64_common *aarch64 = target_to_aarch64(target);
1357 struct armv8_common *armv8 = &aarch64->armv8_common;
1358 struct aarch64_brp *brp_list = aarch64->brp_list;
1359
1360 if (breakpoint->set) {
1361 LOG_WARNING("breakpoint already set");
1362 return retval;
1363 }
1364 /*check available context BRPs*/
1365 while ((brp_list[brp_i].used ||
1366 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1367 brp_i++;
1368
1369 if (brp_i >= aarch64->brp_num) {
1370 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371 return ERROR_FAIL;
1372 }
1373
1374 breakpoint->set = brp_i + 1;
1375 control = ((matchmode & 0x7) << 20)
1376 | (1 << 13)
1377 | (byte_addr_select << 5)
1378 | (3 << 1) | 1;
1379 brp_list[brp_i].used = 1;
1380 brp_list[brp_i].value = (breakpoint->asid);
1381 brp_list[brp_i].control = control;
1382 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1383 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1384 brp_list[brp_i].value);
1385 if (retval != ERROR_OK)
1386 return retval;
1387 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1388 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1389 brp_list[brp_i].control);
1390 if (retval != ERROR_OK)
1391 return retval;
1392 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1393 brp_list[brp_i].control,
1394 brp_list[brp_i].value);
1395 return ERROR_OK;
1396
1397 }
1398
1399 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1400 {
1401 int retval = ERROR_FAIL;
1402 int brp_1 = 0; /* holds the contextID pair */
1403 int brp_2 = 0; /* holds the IVA pair */
1404 uint32_t control_ctx, control_iva;
1405 uint8_t ctx_byte_addr_select = 0x0F;
1406 uint8_t iva_byte_addr_select = 0x0F;
1407 uint8_t ctx_machmode = 0x03;
1408 uint8_t iva_machmode = 0x01;
1409 struct aarch64_common *aarch64 = target_to_aarch64(target);
1410 struct armv8_common *armv8 = &aarch64->armv8_common;
1411 struct aarch64_brp *brp_list = aarch64->brp_list;
1412
1413 if (breakpoint->set) {
1414 LOG_WARNING("breakpoint already set");
1415 return retval;
1416 }
1417 /*check available context BRPs*/
1418 while ((brp_list[brp_1].used ||
1419 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1420 brp_1++;
1421
1422 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1423 if (brp_1 >= aarch64->brp_num) {
1424 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1425 return ERROR_FAIL;
1426 }
1427
1428 while ((brp_list[brp_2].used ||
1429 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1430 brp_2++;
1431
1432 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1433 if (brp_2 >= aarch64->brp_num) {
1434 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1435 return ERROR_FAIL;
1436 }
1437
1438 breakpoint->set = brp_1 + 1;
1439 breakpoint->linked_brp = brp_2;
1440 control_ctx = ((ctx_machmode & 0x7) << 20)
1441 | (brp_2 << 16)
1442 | (0 << 14)
1443 | (ctx_byte_addr_select << 5)
1444 | (3 << 1) | 1;
1445 brp_list[brp_1].used = 1;
1446 brp_list[brp_1].value = (breakpoint->asid);
1447 brp_list[brp_1].control = control_ctx;
1448 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1449 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1450 brp_list[brp_1].value);
1451 if (retval != ERROR_OK)
1452 return retval;
1453 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1454 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1455 brp_list[brp_1].control);
1456 if (retval != ERROR_OK)
1457 return retval;
1458
1459 control_iva = ((iva_machmode & 0x7) << 20)
1460 | (brp_1 << 16)
1461 | (1 << 13)
1462 | (iva_byte_addr_select << 5)
1463 | (3 << 1) | 1;
1464 brp_list[brp_2].used = 1;
1465 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1466 brp_list[brp_2].control = control_iva;
1467 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1468 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1469 brp_list[brp_2].value & 0xFFFFFFFF);
1470 if (retval != ERROR_OK)
1471 return retval;
1472 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1473 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1474 brp_list[brp_2].value >> 32);
1475 if (retval != ERROR_OK)
1476 return retval;
1477 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1478 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1479 brp_list[brp_2].control);
1480 if (retval != ERROR_OK)
1481 return retval;
1482
1483 return ERROR_OK;
1484 }
1485
1486 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1487 {
1488 int retval;
1489 struct aarch64_common *aarch64 = target_to_aarch64(target);
1490 struct armv8_common *armv8 = &aarch64->armv8_common;
1491 struct aarch64_brp *brp_list = aarch64->brp_list;
1492
1493 if (!breakpoint->set) {
1494 LOG_WARNING("breakpoint not set");
1495 return ERROR_OK;
1496 }
1497
1498 if (breakpoint->type == BKPT_HARD) {
1499 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1500 int brp_i = breakpoint->set - 1;
1501 int brp_j = breakpoint->linked_brp;
1502 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1503 LOG_DEBUG("Invalid BRP number in breakpoint");
1504 return ERROR_OK;
1505 }
1506 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1507 brp_list[brp_i].control, brp_list[brp_i].value);
1508 brp_list[brp_i].used = 0;
1509 brp_list[brp_i].value = 0;
1510 brp_list[brp_i].control = 0;
1511 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1512 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1513 brp_list[brp_i].control);
1514 if (retval != ERROR_OK)
1515 return retval;
1516 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1517 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1518 (uint32_t)brp_list[brp_i].value);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1523 (uint32_t)brp_list[brp_i].value);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1527 LOG_DEBUG("Invalid BRP number in breakpoint");
1528 return ERROR_OK;
1529 }
1530 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1531 brp_list[brp_j].control, brp_list[brp_j].value);
1532 brp_list[brp_j].used = 0;
1533 brp_list[brp_j].value = 0;
1534 brp_list[brp_j].control = 0;
1535 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1536 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1537 brp_list[brp_j].control);
1538 if (retval != ERROR_OK)
1539 return retval;
1540 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1541 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1542 (uint32_t)brp_list[brp_j].value);
1543 if (retval != ERROR_OK)
1544 return retval;
1545 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1546 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1547 (uint32_t)brp_list[brp_j].value);
1548 if (retval != ERROR_OK)
1549 return retval;
1550
1551 breakpoint->linked_brp = 0;
1552 breakpoint->set = 0;
1553 return ERROR_OK;
1554
1555 } else {
1556 int brp_i = breakpoint->set - 1;
1557 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1558 LOG_DEBUG("Invalid BRP number in breakpoint");
1559 return ERROR_OK;
1560 }
1561 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1562 brp_list[brp_i].control, brp_list[brp_i].value);
1563 brp_list[brp_i].used = 0;
1564 brp_list[brp_i].value = 0;
1565 brp_list[brp_i].control = 0;
1566 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1567 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1568 brp_list[brp_i].control);
1569 if (retval != ERROR_OK)
1570 return retval;
1571 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1572 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1573 brp_list[brp_i].value);
1574 if (retval != ERROR_OK)
1575 return retval;
1576
1577 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1578 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1579 (uint32_t)brp_list[brp_i].value);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 breakpoint->set = 0;
1583 return ERROR_OK;
1584 }
1585 } else {
1586 /* restore original instruction (kept in target endianness) */
1587
1588 armv8_cache_d_inner_flush_virt(armv8,
1589 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1590 breakpoint->length);
1591
1592 if (breakpoint->length == 4) {
1593 retval = target_write_memory(target,
1594 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1595 4, 1, breakpoint->orig_instr);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 } else {
1599 retval = target_write_memory(target,
1600 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1601 2, 1, breakpoint->orig_instr);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 }
1605
1606 armv8_cache_d_inner_flush_virt(armv8,
1607 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1608 breakpoint->length);
1609
1610 armv8_cache_i_inner_inval_virt(armv8,
1611 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1612 breakpoint->length);
1613 }
1614 breakpoint->set = 0;
1615
1616 return ERROR_OK;
1617 }
1618
1619 static int aarch64_add_breakpoint(struct target *target,
1620 struct breakpoint *breakpoint)
1621 {
1622 struct aarch64_common *aarch64 = target_to_aarch64(target);
1623
1624 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1625 LOG_INFO("no hardware breakpoint available");
1626 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1627 }
1628
1629 if (breakpoint->type == BKPT_HARD)
1630 aarch64->brp_num_available--;
1631
1632 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1633 }
1634
1635 static int aarch64_add_context_breakpoint(struct target *target,
1636 struct breakpoint *breakpoint)
1637 {
1638 struct aarch64_common *aarch64 = target_to_aarch64(target);
1639
1640 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1641 LOG_INFO("no hardware breakpoint available");
1642 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1643 }
1644
1645 if (breakpoint->type == BKPT_HARD)
1646 aarch64->brp_num_available--;
1647
1648 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1649 }
1650
1651 static int aarch64_add_hybrid_breakpoint(struct target *target,
1652 struct breakpoint *breakpoint)
1653 {
1654 struct aarch64_common *aarch64 = target_to_aarch64(target);
1655
1656 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1657 LOG_INFO("no hardware breakpoint available");
1658 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1659 }
1660
1661 if (breakpoint->type == BKPT_HARD)
1662 aarch64->brp_num_available--;
1663
1664 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1665 }
1666
1667 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1668 {
1669 struct aarch64_common *aarch64 = target_to_aarch64(target);
1670
1671 #if 0
1672 /* It is perfectly possible to remove breakpoints while the target is running */
1673 if (target->state != TARGET_HALTED) {
1674 LOG_WARNING("target not halted");
1675 return ERROR_TARGET_NOT_HALTED;
1676 }
1677 #endif
1678
1679 if (breakpoint->set) {
1680 aarch64_unset_breakpoint(target, breakpoint);
1681 if (breakpoint->type == BKPT_HARD)
1682 aarch64->brp_num_available++;
1683 }
1684
1685 return ERROR_OK;
1686 }
1687
1688 /* Setup hardware Watchpoint Register Pair */
1689 static int aarch64_set_watchpoint(struct target *target,
1690 struct watchpoint *watchpoint)
1691 {
1692 int retval;
1693 int wp_i = 0;
1694 uint32_t control, offset, length;
1695 struct aarch64_common *aarch64 = target_to_aarch64(target);
1696 struct armv8_common *armv8 = &aarch64->armv8_common;
1697 struct aarch64_brp *wp_list = aarch64->wp_list;
1698
1699 if (watchpoint->set) {
1700 LOG_WARNING("watchpoint already set");
1701 return ERROR_OK;
1702 }
1703
1704 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1705 wp_i++;
1706 if (wp_i >= aarch64->wp_num) {
1707 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1708 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1709 }
1710
1711 control = (1 << 0) /* enable */
1712 | (3 << 1) /* both user and privileged access */
1713 | (1 << 13); /* higher mode control */
1714
1715 switch (watchpoint->rw) {
1716 case WPT_READ:
1717 control |= 1 << 3;
1718 break;
1719 case WPT_WRITE:
1720 control |= 2 << 3;
1721 break;
1722 case WPT_ACCESS:
1723 control |= 3 << 3;
1724 break;
1725 }
1726
1727 /* Match up to 8 bytes. */
1728 offset = watchpoint->address & 7;
1729 length = watchpoint->length;
1730 if (offset + length > sizeof(uint64_t)) {
1731 length = sizeof(uint64_t) - offset;
1732 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1733 }
1734 for (; length > 0; offset++, length--)
1735 control |= (1 << offset) << 5;
1736
1737 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1738 wp_list[wp_i].control = control;
1739
1740 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1741 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1742 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1743 if (retval != ERROR_OK)
1744 return retval;
1745 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1746 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1747 (uint32_t)(wp_list[wp_i].value >> 32));
1748 if (retval != ERROR_OK)
1749 return retval;
1750
1751 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1752 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1753 control);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1757 wp_list[wp_i].control, wp_list[wp_i].value);
1758
1759 /* Ensure that halting debug mode is enable */
1760 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1761 if (retval != ERROR_OK) {
1762 LOG_DEBUG("Failed to set DSCR.HDE");
1763 return retval;
1764 }
1765
1766 wp_list[wp_i].used = 1;
1767 watchpoint->set = wp_i + 1;
1768
1769 return ERROR_OK;
1770 }
1771
1772 /* Clear hardware Watchpoint Register Pair */
1773 static int aarch64_unset_watchpoint(struct target *target,
1774 struct watchpoint *watchpoint)
1775 {
1776 int retval, wp_i;
1777 struct aarch64_common *aarch64 = target_to_aarch64(target);
1778 struct armv8_common *armv8 = &aarch64->armv8_common;
1779 struct aarch64_brp *wp_list = aarch64->wp_list;
1780
1781 if (!watchpoint->set) {
1782 LOG_WARNING("watchpoint not set");
1783 return ERROR_OK;
1784 }
1785
1786 wp_i = watchpoint->set - 1;
1787 if ((wp_i < 0) || (wp_i >= aarch64->wp_num)) {
1788 LOG_DEBUG("Invalid WP number in watchpoint");
1789 return ERROR_OK;
1790 }
1791 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1792 wp_list[wp_i].control, wp_list[wp_i].value);
1793 wp_list[wp_i].used = 0;
1794 wp_list[wp_i].value = 0;
1795 wp_list[wp_i].control = 0;
1796 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1797 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1798 wp_list[wp_i].control);
1799 if (retval != ERROR_OK)
1800 return retval;
1801 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1802 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1803 wp_list[wp_i].value);
1804 if (retval != ERROR_OK)
1805 return retval;
1806
1807 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1808 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1809 (uint32_t)wp_list[wp_i].value);
1810 if (retval != ERROR_OK)
1811 return retval;
1812 watchpoint->set = 0;
1813
1814 return ERROR_OK;
1815 }
1816
1817 static int aarch64_add_watchpoint(struct target *target,
1818 struct watchpoint *watchpoint)
1819 {
1820 int retval;
1821 struct aarch64_common *aarch64 = target_to_aarch64(target);
1822
1823 if (aarch64->wp_num_available < 1) {
1824 LOG_INFO("no hardware watchpoint available");
1825 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1826 }
1827
1828 retval = aarch64_set_watchpoint(target, watchpoint);
1829 if (retval == ERROR_OK)
1830 aarch64->wp_num_available--;
1831
1832 return retval;
1833 }
1834
1835 static int aarch64_remove_watchpoint(struct target *target,
1836 struct watchpoint *watchpoint)
1837 {
1838 struct aarch64_common *aarch64 = target_to_aarch64(target);
1839
1840 if (watchpoint->set) {
1841 aarch64_unset_watchpoint(target, watchpoint);
1842 aarch64->wp_num_available++;
1843 }
1844
1845 return ERROR_OK;
1846 }
1847
1848 /**
1849 * find out which watchpoint hits
1850 * get exception address and compare the address to watchpoints
1851 */
1852 int aarch64_hit_watchpoint(struct target *target,
1853 struct watchpoint **hit_watchpoint)
1854 {
1855 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1856 return ERROR_FAIL;
1857
1858 struct armv8_common *armv8 = target_to_armv8(target);
1859
1860 target_addr_t exception_address;
1861 struct watchpoint *wp;
1862
1863 exception_address = armv8->dpm.wp_addr;
1864
1865 if (exception_address == 0xFFFFFFFF)
1866 return ERROR_FAIL;
1867
1868 for (wp = target->watchpoints; wp; wp = wp->next)
1869 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1870 *hit_watchpoint = wp;
1871 return ERROR_OK;
1872 }
1873
1874 return ERROR_FAIL;
1875 }
1876
1877 /*
1878 * Cortex-A8 Reset functions
1879 */
1880
1881 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1882 {
1883 struct armv8_common *armv8 = target_to_armv8(target);
1884 uint32_t edecr;
1885 int retval;
1886
1887 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1888 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1889 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1890 if (retval != ERROR_OK)
1891 return retval;
1892
1893 if (enable)
1894 edecr |= ECR_RCE;
1895 else
1896 edecr &= ~ECR_RCE;
1897
1898 return mem_ap_write_atomic_u32(armv8->debug_ap,
1899 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1900 }
1901
1902 static int aarch64_clear_reset_catch(struct target *target)
1903 {
1904 struct armv8_common *armv8 = target_to_armv8(target);
1905 uint32_t edesr;
1906 int retval;
1907 bool was_triggered;
1908
1909 /* check if Reset Catch debug event triggered as expected */
1910 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1911 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1912 if (retval != ERROR_OK)
1913 return retval;
1914
1915 was_triggered = !!(edesr & ESR_RC);
1916 LOG_DEBUG("Reset Catch debug event %s",
1917 was_triggered ? "triggered" : "NOT triggered!");
1918
1919 if (was_triggered) {
1920 /* clear pending Reset Catch debug event */
1921 edesr &= ~ESR_RC;
1922 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1923 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1924 if (retval != ERROR_OK)
1925 return retval;
1926 }
1927
1928 return ERROR_OK;
1929 }
1930
1931 static int aarch64_assert_reset(struct target *target)
1932 {
1933 struct armv8_common *armv8 = target_to_armv8(target);
1934 enum reset_types reset_config = jtag_get_reset_config();
1935 int retval;
1936
1937 LOG_DEBUG(" ");
1938
1939 /* Issue some kind of warm reset. */
1940 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1941 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1942 else if (reset_config & RESET_HAS_SRST) {
1943 bool srst_asserted = false;
1944
1945 if (target->reset_halt) {
1946 if (target_was_examined(target)) {
1947
1948 if (reset_config & RESET_SRST_NO_GATING) {
1949 /*
1950 * SRST needs to be asserted *before* Reset Catch
1951 * debug event can be set up.
1952 */
1953 adapter_assert_reset();
1954 srst_asserted = true;
1955
1956 /* make sure to clear all sticky errors */
1957 mem_ap_write_atomic_u32(armv8->debug_ap,
1958 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1959 }
1960
1961 /* set up Reset Catch debug event to halt the CPU after reset */
1962 retval = aarch64_enable_reset_catch(target, true);
1963 if (retval != ERROR_OK)
1964 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1965 target_name(target));
1966 } else {
1967 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1968 target_name(target));
1969 }
1970 }
1971
1972 /* REVISIT handle "pulls" cases, if there's
1973 * hardware that needs them to work.
1974 */
1975 if (!srst_asserted)
1976 adapter_assert_reset();
1977 } else {
1978 LOG_ERROR("%s: how to reset?", target_name(target));
1979 return ERROR_FAIL;
1980 }
1981
1982 /* registers are now invalid */
1983 if (target_was_examined(target)) {
1984 register_cache_invalidate(armv8->arm.core_cache);
1985 register_cache_invalidate(armv8->arm.core_cache->next);
1986 }
1987
1988 target->state = TARGET_RESET;
1989
1990 return ERROR_OK;
1991 }
1992
1993 static int aarch64_deassert_reset(struct target *target)
1994 {
1995 int retval;
1996
1997 LOG_DEBUG(" ");
1998
1999 /* be certain SRST is off */
2000 adapter_deassert_reset();
2001
2002 if (!target_was_examined(target))
2003 return ERROR_OK;
2004
2005 retval = aarch64_init_debug_access(target);
2006 if (retval != ERROR_OK)
2007 return retval;
2008
2009 retval = aarch64_poll(target);
2010 if (retval != ERROR_OK)
2011 return retval;
2012
2013 if (target->reset_halt) {
2014 /* clear pending Reset Catch debug event */
2015 retval = aarch64_clear_reset_catch(target);
2016 if (retval != ERROR_OK)
2017 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2018 target_name(target));
2019
2020 /* disable Reset Catch debug event */
2021 retval = aarch64_enable_reset_catch(target, false);
2022 if (retval != ERROR_OK)
2023 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2024 target_name(target));
2025
2026 if (target->state != TARGET_HALTED) {
2027 LOG_WARNING("%s: ran after reset and before halt ...",
2028 target_name(target));
2029 retval = target_halt(target);
2030 if (retval != ERROR_OK)
2031 return retval;
2032 }
2033 }
2034
2035 return ERROR_OK;
2036 }
2037
2038 static int aarch64_write_cpu_memory_slow(struct target *target,
2039 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2040 {
2041 struct armv8_common *armv8 = target_to_armv8(target);
2042 struct arm_dpm *dpm = &armv8->dpm;
2043 struct arm *arm = &armv8->arm;
2044 int retval;
2045
2046 armv8_reg_current(arm, 1)->dirty = true;
2047
2048 /* change DCC to normal mode if necessary */
2049 if (*dscr & DSCR_MA) {
2050 *dscr &= ~DSCR_MA;
2051 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2052 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2053 if (retval != ERROR_OK)
2054 return retval;
2055 }
2056
2057 while (count) {
2058 uint32_t data, opcode;
2059
2060 /* write the data to store into DTRRX */
2061 if (size == 1)
2062 data = *buffer;
2063 else if (size == 2)
2064 data = target_buffer_get_u16(target, buffer);
2065 else
2066 data = target_buffer_get_u32(target, buffer);
2067 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2068 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2069 if (retval != ERROR_OK)
2070 return retval;
2071
2072 if (arm->core_state == ARM_STATE_AARCH64)
2073 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2074 else
2075 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2076 if (retval != ERROR_OK)
2077 return retval;
2078
2079 if (size == 1)
2080 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2081 else if (size == 2)
2082 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2083 else
2084 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2085 retval = dpm->instr_execute(dpm, opcode);
2086 if (retval != ERROR_OK)
2087 return retval;
2088
2089 /* Advance */
2090 buffer += size;
2091 --count;
2092 }
2093
2094 return ERROR_OK;
2095 }
2096
2097 static int aarch64_write_cpu_memory_fast(struct target *target,
2098 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2099 {
2100 struct armv8_common *armv8 = target_to_armv8(target);
2101 struct arm *arm = &armv8->arm;
2102 int retval;
2103
2104 armv8_reg_current(arm, 1)->dirty = true;
2105
2106 /* Step 1.d - Change DCC to memory mode */
2107 *dscr |= DSCR_MA;
2108 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2109 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2110 if (retval != ERROR_OK)
2111 return retval;
2112
2113
2114 /* Step 2.a - Do the write */
2115 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2116 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2117 if (retval != ERROR_OK)
2118 return retval;
2119
2120 /* Step 3.a - Switch DTR mode back to Normal mode */
2121 *dscr &= ~DSCR_MA;
2122 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2123 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2124 if (retval != ERROR_OK)
2125 return retval;
2126
2127 return ERROR_OK;
2128 }
2129
2130 static int aarch64_write_cpu_memory(struct target *target,
2131 uint64_t address, uint32_t size,
2132 uint32_t count, const uint8_t *buffer)
2133 {
2134 /* write memory through APB-AP */
2135 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2136 struct armv8_common *armv8 = target_to_armv8(target);
2137 struct arm_dpm *dpm = &armv8->dpm;
2138 struct arm *arm = &armv8->arm;
2139 uint32_t dscr;
2140
2141 if (target->state != TARGET_HALTED) {
2142 LOG_WARNING("target not halted");
2143 return ERROR_TARGET_NOT_HALTED;
2144 }
2145
2146 /* Mark register X0 as dirty, as it will be used
2147 * for transferring the data.
2148 * It will be restored automatically when exiting
2149 * debug mode
2150 */
2151 armv8_reg_current(arm, 0)->dirty = true;
2152
2153 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2154
2155 /* Read DSCR */
2156 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2157 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2158 if (retval != ERROR_OK)
2159 return retval;
2160
2161 /* Set Normal access mode */
2162 dscr = (dscr & ~DSCR_MA);
2163 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2164 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2165 if (retval != ERROR_OK)
2166 return retval;
2167
2168 if (arm->core_state == ARM_STATE_AARCH64) {
2169 /* Write X0 with value 'address' using write procedure */
2170 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2171 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2172 retval = dpm->instr_write_data_dcc_64(dpm,
2173 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2174 } else {
2175 /* Write R0 with value 'address' using write procedure */
2176 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2177 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2178 retval = dpm->instr_write_data_dcc(dpm,
2179 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2180 }
2181
2182 if (retval != ERROR_OK)
2183 return retval;
2184
2185 if (size == 4 && (address % 4) == 0)
2186 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2187 else
2188 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2189
2190 if (retval != ERROR_OK) {
2191 /* Unset DTR mode */
2192 mem_ap_read_atomic_u32(armv8->debug_ap,
2193 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2194 dscr &= ~DSCR_MA;
2195 mem_ap_write_atomic_u32(armv8->debug_ap,
2196 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2197 }
2198
2199 /* Check for sticky abort flags in the DSCR */
2200 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2201 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2202 if (retval != ERROR_OK)
2203 return retval;
2204
2205 dpm->dscr = dscr;
2206 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2207 /* Abort occurred - clear it and exit */
2208 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2209 armv8_dpm_handle_exception(dpm, true);
2210 return ERROR_FAIL;
2211 }
2212
2213 /* Done */
2214 return ERROR_OK;
2215 }
2216
2217 static int aarch64_read_cpu_memory_slow(struct target *target,
2218 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2219 {
2220 struct armv8_common *armv8 = target_to_armv8(target);
2221 struct arm_dpm *dpm = &armv8->dpm;
2222 struct arm *arm = &armv8->arm;
2223 int retval;
2224
2225 armv8_reg_current(arm, 1)->dirty = true;
2226
2227 /* change DCC to normal mode (if necessary) */
2228 if (*dscr & DSCR_MA) {
2229 *dscr &= DSCR_MA;
2230 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2231 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2232 if (retval != ERROR_OK)
2233 return retval;
2234 }
2235
2236 while (count) {
2237 uint32_t opcode, data;
2238
2239 if (size == 1)
2240 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2241 else if (size == 2)
2242 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2243 else
2244 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2245 retval = dpm->instr_execute(dpm, opcode);
2246 if (retval != ERROR_OK)
2247 return retval;
2248
2249 if (arm->core_state == ARM_STATE_AARCH64)
2250 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2251 else
2252 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2253 if (retval != ERROR_OK)
2254 return retval;
2255
2256 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2257 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2258 if (retval != ERROR_OK)
2259 return retval;
2260
2261 if (size == 1)
2262 *buffer = (uint8_t)data;
2263 else if (size == 2)
2264 target_buffer_set_u16(target, buffer, (uint16_t)data);
2265 else
2266 target_buffer_set_u32(target, buffer, data);
2267
2268 /* Advance */
2269 buffer += size;
2270 --count;
2271 }
2272
2273 return ERROR_OK;
2274 }
2275
2276 static int aarch64_read_cpu_memory_fast(struct target *target,
2277 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2278 {
2279 struct armv8_common *armv8 = target_to_armv8(target);
2280 struct arm_dpm *dpm = &armv8->dpm;
2281 struct arm *arm = &armv8->arm;
2282 int retval;
2283 uint32_t value;
2284
2285 /* Mark X1 as dirty */
2286 armv8_reg_current(arm, 1)->dirty = true;
2287
2288 if (arm->core_state == ARM_STATE_AARCH64) {
2289 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2290 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2291 } else {
2292 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2293 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2294 }
2295
2296 if (retval != ERROR_OK)
2297 return retval;
2298
2299 /* Step 1.e - Change DCC to memory mode */
2300 *dscr |= DSCR_MA;
2301 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2302 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2303 if (retval != ERROR_OK)
2304 return retval;
2305
2306 /* Step 1.f - read DBGDTRTX and discard the value */
2307 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2308 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2309 if (retval != ERROR_OK)
2310 return retval;
2311
2312 count--;
2313 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2314 * Abort flags are sticky, so can be read at end of transactions
2315 *
2316 * This data is read in aligned to 32 bit boundary.
2317 */
2318
2319 if (count) {
2320 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2321 * increments X0 by 4. */
2322 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2323 armv8->debug_base + CPUV8_DBG_DTRTX);
2324 if (retval != ERROR_OK)
2325 return retval;
2326 }
2327
2328 /* Step 3.a - set DTR access mode back to Normal mode */
2329 *dscr &= ~DSCR_MA;
2330 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2331 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2332 if (retval != ERROR_OK)
2333 return retval;
2334
2335 /* Step 3.b - read DBGDTRTX for the final value */
2336 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2337 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2338 if (retval != ERROR_OK)
2339 return retval;
2340
2341 target_buffer_set_u32(target, buffer + count * 4, value);
2342 return retval;
2343 }
2344
2345 static int aarch64_read_cpu_memory(struct target *target,
2346 target_addr_t address, uint32_t size,
2347 uint32_t count, uint8_t *buffer)
2348 {
2349 /* read memory through APB-AP */
2350 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2351 struct armv8_common *armv8 = target_to_armv8(target);
2352 struct arm_dpm *dpm = &armv8->dpm;
2353 struct arm *arm = &armv8->arm;
2354 uint32_t dscr;
2355
2356 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2357 address, size, count);
2358
2359 if (target->state != TARGET_HALTED) {
2360 LOG_WARNING("target not halted");
2361 return ERROR_TARGET_NOT_HALTED;
2362 }
2363
2364 /* Mark register X0 as dirty, as it will be used
2365 * for transferring the data.
2366 * It will be restored automatically when exiting
2367 * debug mode
2368 */
2369 armv8_reg_current(arm, 0)->dirty = true;
2370
2371 /* Read DSCR */
2372 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2373 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2374 if (retval != ERROR_OK)
2375 return retval;
2376
2377 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2378
2379 /* Set Normal access mode */
2380 dscr &= ~DSCR_MA;
2381 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2382 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2383 if (retval != ERROR_OK)
2384 return retval;
2385
2386 if (arm->core_state == ARM_STATE_AARCH64) {
2387 /* Write X0 with value 'address' using write procedure */
2388 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2389 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2390 retval = dpm->instr_write_data_dcc_64(dpm,
2391 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2392 } else {
2393 /* Write R0 with value 'address' using write procedure */
2394 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2395 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2396 retval = dpm->instr_write_data_dcc(dpm,
2397 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2398 }
2399
2400 if (retval != ERROR_OK)
2401 return retval;
2402
2403 if (size == 4 && (address % 4) == 0)
2404 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2405 else
2406 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2407
2408 if (dscr & DSCR_MA) {
2409 dscr &= ~DSCR_MA;
2410 mem_ap_write_atomic_u32(armv8->debug_ap,
2411 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2412 }
2413
2414 if (retval != ERROR_OK)
2415 return retval;
2416
2417 /* Check for sticky abort flags in the DSCR */
2418 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2419 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2420 if (retval != ERROR_OK)
2421 return retval;
2422
2423 dpm->dscr = dscr;
2424
2425 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2426 /* Abort occurred - clear it and exit */
2427 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2428 armv8_dpm_handle_exception(dpm, true);
2429 return ERROR_FAIL;
2430 }
2431
2432 /* Done */
2433 return ERROR_OK;
2434 }
2435
2436 static int aarch64_read_phys_memory(struct target *target,
2437 target_addr_t address, uint32_t size,
2438 uint32_t count, uint8_t *buffer)
2439 {
2440 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2441
2442 if (count && buffer) {
2443 /* read memory through APB-AP */
2444 retval = aarch64_mmu_modify(target, 0);
2445 if (retval != ERROR_OK)
2446 return retval;
2447 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2448 }
2449 return retval;
2450 }
2451
2452 static int aarch64_read_memory(struct target *target, target_addr_t address,
2453 uint32_t size, uint32_t count, uint8_t *buffer)
2454 {
2455 int mmu_enabled = 0;
2456 int retval;
2457
2458 /* determine if MMU was enabled on target stop */
2459 retval = aarch64_mmu(target, &mmu_enabled);
2460 if (retval != ERROR_OK)
2461 return retval;
2462
2463 if (mmu_enabled) {
2464 /* enable MMU as we could have disabled it for phys access */
2465 retval = aarch64_mmu_modify(target, 1);
2466 if (retval != ERROR_OK)
2467 return retval;
2468 }
2469 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2470 }
2471
2472 static int aarch64_write_phys_memory(struct target *target,
2473 target_addr_t address, uint32_t size,
2474 uint32_t count, const uint8_t *buffer)
2475 {
2476 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2477
2478 if (count && buffer) {
2479 /* write memory through APB-AP */
2480 retval = aarch64_mmu_modify(target, 0);
2481 if (retval != ERROR_OK)
2482 return retval;
2483 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2484 }
2485
2486 return retval;
2487 }
2488
2489 static int aarch64_write_memory(struct target *target, target_addr_t address,
2490 uint32_t size, uint32_t count, const uint8_t *buffer)
2491 {
2492 int mmu_enabled = 0;
2493 int retval;
2494
2495 /* determine if MMU was enabled on target stop */
2496 retval = aarch64_mmu(target, &mmu_enabled);
2497 if (retval != ERROR_OK)
2498 return retval;
2499
2500 if (mmu_enabled) {
2501 /* enable MMU as we could have disabled it for phys access */
2502 retval = aarch64_mmu_modify(target, 1);
2503 if (retval != ERROR_OK)
2504 return retval;
2505 }
2506 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2507 }
2508
2509 static int aarch64_handle_target_request(void *priv)
2510 {
2511 struct target *target = priv;
2512 struct armv8_common *armv8 = target_to_armv8(target);
2513 int retval;
2514
2515 if (!target_was_examined(target))
2516 return ERROR_OK;
2517 if (!target->dbg_msg_enabled)
2518 return ERROR_OK;
2519
2520 if (target->state == TARGET_RUNNING) {
2521 uint32_t request;
2522 uint32_t dscr;
2523 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2524 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2525
2526 /* check if we have data */
2527 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2528 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2529 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2530 if (retval == ERROR_OK) {
2531 target_request(target, request);
2532 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2533 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2534 }
2535 }
2536 }
2537
2538 return ERROR_OK;
2539 }
2540
2541 static int aarch64_examine_first(struct target *target)
2542 {
2543 struct aarch64_common *aarch64 = target_to_aarch64(target);
2544 struct armv8_common *armv8 = &aarch64->armv8_common;
2545 struct adiv5_dap *swjdp = armv8->arm.dap;
2546 struct aarch64_private_config *pc = target->private_config;
2547 int i;
2548 int retval = ERROR_OK;
2549 uint64_t debug, ttypr;
2550 uint32_t cpuid;
2551 uint32_t tmp0, tmp1, tmp2, tmp3;
2552 debug = ttypr = cpuid = 0;
2553
2554 if (!pc)
2555 return ERROR_FAIL;
2556
2557 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2558 /* Search for the APB-AB */
2559 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2560 if (retval != ERROR_OK) {
2561 LOG_ERROR("Could not find APB-AP for debug access");
2562 return retval;
2563 }
2564 } else {
2565 armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
2566 }
2567
2568 retval = mem_ap_init(armv8->debug_ap);
2569 if (retval != ERROR_OK) {
2570 LOG_ERROR("Could not initialize the APB-AP");
2571 return retval;
2572 }
2573
2574 armv8->debug_ap->memaccess_tck = 10;
2575
2576 if (!target->dbgbase_set) {
2577 target_addr_t dbgbase;
2578 /* Get ROM Table base */
2579 uint32_t apid;
2580 int32_t coreidx = target->coreid;
2581 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2582 if (retval != ERROR_OK)
2583 return retval;
2584 /* Lookup Processor DAP */
2585 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2586 &armv8->debug_base, &coreidx);
2587 if (retval != ERROR_OK)
2588 return retval;
2589 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT
2590 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2591 } else
2592 armv8->debug_base = target->dbgbase;
2593
2594 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2595 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2596 if (retval != ERROR_OK) {
2597 LOG_DEBUG("Examine %s failed", "oslock");
2598 return retval;
2599 }
2600
2601 retval = mem_ap_read_u32(armv8->debug_ap,
2602 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2603 if (retval != ERROR_OK) {
2604 LOG_DEBUG("Examine %s failed", "CPUID");
2605 return retval;
2606 }
2607
2608 retval = mem_ap_read_u32(armv8->debug_ap,
2609 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2610 retval += mem_ap_read_u32(armv8->debug_ap,
2611 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2612 if (retval != ERROR_OK) {
2613 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2614 return retval;
2615 }
2616 retval = mem_ap_read_u32(armv8->debug_ap,
2617 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2618 retval += mem_ap_read_u32(armv8->debug_ap,
2619 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2620 if (retval != ERROR_OK) {
2621 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2622 return retval;
2623 }
2624
2625 retval = dap_run(armv8->debug_ap->dap);
2626 if (retval != ERROR_OK) {
2627 LOG_ERROR("%s: examination failed\n", target_name(target));
2628 return retval;
2629 }
2630
2631 ttypr |= tmp1;
2632 ttypr = (ttypr << 32) | tmp0;
2633 debug |= tmp3;
2634 debug = (debug << 32) | tmp2;
2635
2636 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2637 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2638 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2639
2640 if (!pc->cti) {
2641 LOG_TARGET_ERROR(target, "CTI not specified");
2642 return ERROR_FAIL;
2643 }
2644
2645 armv8->cti = pc->cti;
2646
2647 retval = aarch64_dpm_setup(aarch64, debug);
2648 if (retval != ERROR_OK)
2649 return retval;
2650
2651 /* Setup Breakpoint Register Pairs */
2652 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2653 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2654 aarch64->brp_num_available = aarch64->brp_num;
2655 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2656 for (i = 0; i < aarch64->brp_num; i++) {
2657 aarch64->brp_list[i].used = 0;
2658 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2659 aarch64->brp_list[i].type = BRP_NORMAL;
2660 else
2661 aarch64->brp_list[i].type = BRP_CONTEXT;
2662 aarch64->brp_list[i].value = 0;
2663 aarch64->brp_list[i].control = 0;
2664 aarch64->brp_list[i].brpn = i;
2665 }
2666
2667 /* Setup Watchpoint Register Pairs */
2668 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2669 aarch64->wp_num_available = aarch64->wp_num;
2670 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2671 for (i = 0; i < aarch64->wp_num; i++) {
2672 aarch64->wp_list[i].used = 0;
2673 aarch64->wp_list[i].type = BRP_NORMAL;
2674 aarch64->wp_list[i].value = 0;
2675 aarch64->wp_list[i].control = 0;
2676 aarch64->wp_list[i].brpn = i;
2677 }
2678
2679 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2680 aarch64->brp_num, aarch64->wp_num);
2681
2682 target->state = TARGET_UNKNOWN;
2683 target->debug_reason = DBG_REASON_NOTHALTED;
2684 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2685 target_set_examined(target);
2686 return ERROR_OK;
2687 }
2688
2689 static int aarch64_examine(struct target *target)
2690 {
2691 int retval = ERROR_OK;
2692
2693 /* don't re-probe hardware after each reset */
2694 if (!target_was_examined(target))
2695 retval = aarch64_examine_first(target);
2696
2697 /* Configure core debug access */
2698 if (retval == ERROR_OK)
2699 retval = aarch64_init_debug_access(target);
2700
2701 return retval;
2702 }
2703
2704 /*
2705 * Cortex-A8 target creation and initialization
2706 */
2707
2708 static int aarch64_init_target(struct command_context *cmd_ctx,
2709 struct target *target)
2710 {
2711 /* examine_first() does a bunch of this */
2712 arm_semihosting_init(target);
2713 return ERROR_OK;
2714 }
2715
2716 static int aarch64_init_arch_info(struct target *target,
2717 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2718 {
2719 struct armv8_common *armv8 = &aarch64->armv8_common;
2720
2721 /* Setup struct aarch64_common */
2722 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2723 armv8->arm.dap = dap;
2724
2725 /* register arch-specific functions */
2726 armv8->examine_debug_reason = NULL;
2727 armv8->post_debug_entry = aarch64_post_debug_entry;
2728 armv8->pre_restore_context = NULL;
2729 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2730
2731 armv8_init_arch_info(target, armv8);
2732 target_register_timer_callback(aarch64_handle_target_request, 1,
2733 TARGET_TIMER_TYPE_PERIODIC, target);
2734
2735 return ERROR_OK;
2736 }
2737
2738 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2739 {
2740 struct aarch64_private_config *pc = target->private_config;
2741 struct aarch64_common *aarch64;
2742
2743 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2744 return ERROR_FAIL;
2745
2746 aarch64 = calloc(1, sizeof(struct aarch64_common));
2747 if (!aarch64) {
2748 LOG_ERROR("Out of memory");
2749 return ERROR_FAIL;
2750 }
2751
2752 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2753 }
2754
2755 static void aarch64_deinit_target(struct target *target)
2756 {
2757 struct aarch64_common *aarch64 = target_to_aarch64(target);
2758 struct armv8_common *armv8 = &aarch64->armv8_common;
2759 struct arm_dpm *dpm = &armv8->dpm;
2760
2761 armv8_free_reg_cache(target);
2762 free(aarch64->brp_list);
2763 free(dpm->dbp);
2764 free(dpm->dwp);
2765 free(target->private_config);
2766 free(aarch64);
2767 }
2768
2769 static int aarch64_mmu(struct target *target, int *enabled)
2770 {
2771 if (target->state != TARGET_HALTED) {
2772 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2773 return ERROR_TARGET_INVALID;
2774 }
2775
2776 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2777 return ERROR_OK;
2778 }
2779
2780 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2781 target_addr_t *phys)
2782 {
2783 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2784 }
2785
2786 /*
2787 * private target configuration items
2788 */
2789 enum aarch64_cfg_param {
2790 CFG_CTI,
2791 };
2792
2793 static const struct jim_nvp nvp_config_opts[] = {
2794 { .name = "-cti", .value = CFG_CTI },
2795 { .name = NULL, .value = -1 }
2796 };
2797
2798 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2799 {
2800 struct aarch64_private_config *pc;
2801 struct jim_nvp *n;
2802 int e;
2803
2804 pc = (struct aarch64_private_config *)target->private_config;
2805 if (!pc) {
2806 pc = calloc(1, sizeof(struct aarch64_private_config));
2807 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2808 target->private_config = pc;
2809 }
2810
2811 /*
2812 * Call adiv5_jim_configure() to parse the common DAP options
2813 * It will return JIM_CONTINUE if it didn't find any known
2814 * options, JIM_OK if it correctly parsed the topmost option
2815 * and JIM_ERR if an error occurred during parameter evaluation.
2816 * For JIM_CONTINUE, we check our own params.
2817 *
2818 * adiv5_jim_configure() assumes 'private_config' to point to
2819 * 'struct adiv5_private_config'. Override 'private_config'!
2820 */
2821 target->private_config = &pc->adiv5_config;
2822 e = adiv5_jim_configure(target, goi);
2823 target->private_config = pc;
2824 if (e != JIM_CONTINUE)
2825 return e;
2826
2827 /* parse config or cget options ... */
2828 if (goi->argc > 0) {
2829 Jim_SetEmptyResult(goi->interp);
2830
2831 /* check first if topmost item is for us */
2832 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2833 goi->argv[0], &n);
2834 if (e != JIM_OK)
2835 return JIM_CONTINUE;
2836
2837 e = jim_getopt_obj(goi, NULL);
2838 if (e != JIM_OK)
2839 return e;
2840
2841 switch (n->value) {
2842 case CFG_CTI: {
2843 if (goi->isconfigure) {
2844 Jim_Obj *o_cti;
2845 struct arm_cti *cti;
2846 e = jim_getopt_obj(goi, &o_cti);
2847 if (e != JIM_OK)
2848 return e;
2849 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2850 if (!cti) {
2851 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2852 return JIM_ERR;
2853 }
2854 pc->cti = cti;
2855 } else {
2856 if (goi->argc != 0) {
2857 Jim_WrongNumArgs(goi->interp,
2858 goi->argc, goi->argv,
2859 "NO PARAMS");
2860 return JIM_ERR;
2861 }
2862
2863 if (!pc || !pc->cti) {
2864 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2865 return JIM_ERR;
2866 }
2867 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2868 }
2869 break;
2870 }
2871
2872 default:
2873 return JIM_CONTINUE;
2874 }
2875 }
2876
2877 return JIM_OK;
2878 }
2879
2880 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2881 {
2882 struct target *target = get_current_target(CMD_CTX);
2883 struct armv8_common *armv8 = target_to_armv8(target);
2884
2885 return armv8_handle_cache_info_command(CMD,
2886 &armv8->armv8_mmu.armv8_cache);
2887 }
2888
2889 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2890 {
2891 struct target *target = get_current_target(CMD_CTX);
2892 if (!target_was_examined(target)) {
2893 LOG_ERROR("target not examined yet");
2894 return ERROR_FAIL;
2895 }
2896
2897 return aarch64_init_debug_access(target);
2898 }
2899
2900 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2901 {
2902 struct target *target = get_current_target(CMD_CTX);
2903
2904 if (!target) {
2905 LOG_ERROR("No target selected");
2906 return ERROR_FAIL;
2907 }
2908
2909 struct aarch64_common *aarch64 = target_to_aarch64(target);
2910
2911 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2912 command_print(CMD, "current target isn't an AArch64");
2913 return ERROR_FAIL;
2914 }
2915
2916 int count = 1;
2917 target_addr_t address;
2918
2919 switch (CMD_ARGC) {
2920 case 2:
2921 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2922 /* FALL THROUGH */
2923 case 1:
2924 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2925 break;
2926 default:
2927 return ERROR_COMMAND_SYNTAX_ERROR;
2928 }
2929
2930 return a64_disassemble(CMD, target, address, count);
2931 }
2932
2933 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2934 {
2935 struct target *target = get_current_target(CMD_CTX);
2936 struct aarch64_common *aarch64 = target_to_aarch64(target);
2937
2938 static const struct jim_nvp nvp_maskisr_modes[] = {
2939 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2940 { .name = "on", .value = AARCH64_ISRMASK_ON },
2941 { .name = NULL, .value = -1 },
2942 };
2943 const struct jim_nvp *n;
2944
2945 if (CMD_ARGC > 0) {
2946 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2947 if (!n->name) {
2948 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2949 return ERROR_COMMAND_SYNTAX_ERROR;
2950 }
2951
2952 aarch64->isrmasking_mode = n->value;
2953 }
2954
2955 n = jim_nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2956 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2957
2958 return ERROR_OK;
2959 }
2960
2961 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2962 {
2963 struct command *c = jim_to_command(interp);
2964 struct command_context *context;
2965 struct target *target;
2966 struct arm *arm;
2967 int retval;
2968 bool is_mcr = false;
2969 int arg_cnt = 0;
2970
2971 if (!strcmp(c->name, "mcr")) {
2972 is_mcr = true;
2973 arg_cnt = 7;
2974 } else {
2975 arg_cnt = 6;
2976 }
2977
2978 context = current_command_context(interp);
2979 assert(context);
2980
2981 target = get_current_target(context);
2982 if (!target) {
2983 LOG_ERROR("%s: no current target", __func__);
2984 return JIM_ERR;
2985 }
2986 if (!target_was_examined(target)) {
2987 LOG_ERROR("%s: not yet examined", target_name(target));
2988 return JIM_ERR;
2989 }
2990
2991 arm = target_to_arm(target);
2992 if (!is_arm(arm)) {
2993 LOG_ERROR("%s: not an ARM", target_name(target));
2994 return JIM_ERR;
2995 }
2996
2997 if (target->state != TARGET_HALTED)
2998 return ERROR_TARGET_NOT_HALTED;
2999
3000 if (arm->core_state == ARM_STATE_AARCH64) {
3001 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
3002 return JIM_ERR;
3003 }
3004
3005 if (argc != arg_cnt) {
3006 LOG_ERROR("%s: wrong number of arguments", __func__);
3007 return JIM_ERR;
3008 }
3009
3010 int cpnum;
3011 uint32_t op1;
3012 uint32_t op2;
3013 uint32_t crn;
3014 uint32_t crm;
3015 uint32_t value;
3016 long l;
3017
3018 /* NOTE: parameter sequence matches ARM instruction set usage:
3019 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3020 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3021 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3022 */
3023 retval = Jim_GetLong(interp, argv[1], &l);
3024 if (retval != JIM_OK)
3025 return retval;
3026 if (l & ~0xf) {
3027 LOG_ERROR("%s: %s %d out of range", __func__,
3028 "coprocessor", (int) l);
3029 return JIM_ERR;
3030 }
3031 cpnum = l;
3032
3033 retval = Jim_GetLong(interp, argv[2], &l);
3034 if (retval != JIM_OK)
3035 return retval;
3036 if (l & ~0x7) {
3037 LOG_ERROR("%s: %s %d out of range", __func__,
3038 "op1", (int) l);
3039 return JIM_ERR;
3040 }
3041 op1 = l;
3042
3043 retval = Jim_GetLong(interp, argv[3], &l);
3044 if (retval != JIM_OK)
3045 return retval;
3046 if (l & ~0xf) {
3047 LOG_ERROR("%s: %s %d out of range", __func__,
3048 "CRn", (int) l);
3049 return JIM_ERR;
3050 }
3051 crn = l;
3052
3053 retval = Jim_GetLong(interp, argv[4], &l);
3054 if (retval != JIM_OK)
3055 return retval;
3056 if (l & ~0xf) {
3057 LOG_ERROR("%s: %s %d out of range", __func__,
3058 "CRm", (int) l);
3059 return JIM_ERR;
3060 }
3061 crm = l;
3062
3063 retval = Jim_GetLong(interp, argv[5], &l);
3064 if (retval != JIM_OK)
3065 return retval;
3066 if (l & ~0x7) {
3067 LOG_ERROR("%s: %s %d out of range", __func__,
3068 "op2", (int) l);
3069 return JIM_ERR;
3070 }
3071 op2 = l;
3072
3073 value = 0;
3074
3075 if (is_mcr == true) {
3076 retval = Jim_GetLong(interp, argv[6], &l);
3077 if (retval != JIM_OK)
3078 return retval;
3079 value = l;
3080
3081 /* NOTE: parameters reordered! */
3082 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3083 retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3084 if (retval != ERROR_OK)
3085 return JIM_ERR;
3086 } else {
3087 /* NOTE: parameters reordered! */
3088 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3089 retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3090 if (retval != ERROR_OK)
3091 return JIM_ERR;
3092
3093 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
3094 }
3095
3096 return JIM_OK;
3097 }
3098
3099 static const struct command_registration aarch64_exec_command_handlers[] = {
3100 {
3101 .name = "cache_info",
3102 .handler = aarch64_handle_cache_info_command,
3103 .mode = COMMAND_EXEC,
3104 .help = "display information about target caches",
3105 .usage = "",
3106 },
3107 {
3108 .name = "dbginit",
3109 .handler = aarch64_handle_dbginit_command,
3110 .mode = COMMAND_EXEC,
3111 .help = "Initialize core debug",
3112 .usage = "",
3113 },
3114 {
3115 .name = "disassemble",
3116 .handler = aarch64_handle_disassemble_command,
3117 .mode = COMMAND_EXEC,
3118 .help = "Disassemble instructions",
3119 .usage = "address [count]",
3120 },
3121 {
3122 .name = "maskisr",
3123 .handler = aarch64_mask_interrupts_command,
3124 .mode = COMMAND_ANY,
3125 .help = "mask aarch64 interrupts during single-step",
3126 .usage = "['on'|'off']",
3127 },
3128 {
3129 .name = "mcr",
3130 .mode = COMMAND_EXEC,
3131 .jim_handler = jim_mcrmrc,
3132 .help = "write coprocessor register",
3133 .usage = "cpnum op1 CRn CRm op2 value",
3134 },
3135 {
3136 .name = "mrc",
3137 .mode = COMMAND_EXEC,
3138 .jim_handler = jim_mcrmrc,
3139 .help = "read coprocessor register",
3140 .usage = "cpnum op1 CRn CRm op2",
3141 },
3142 {
3143 .chain = smp_command_handlers,
3144 },
3145
3146
3147 COMMAND_REGISTRATION_DONE
3148 };
3149
3150 extern const struct command_registration semihosting_common_handlers[];
3151
3152 static const struct command_registration aarch64_command_handlers[] = {
3153 {
3154 .name = "arm",
3155 .mode = COMMAND_ANY,
3156 .help = "ARM Command Group",
3157 .usage = "",
3158 .chain = semihosting_common_handlers
3159 },
3160 {
3161 .chain = armv8_command_handlers,
3162 },
3163 {
3164 .name = "aarch64",
3165 .mode = COMMAND_ANY,
3166 .help = "Aarch64 command group",
3167 .usage = "",
3168 .chain = aarch64_exec_command_handlers,
3169 },
3170 COMMAND_REGISTRATION_DONE
3171 };
3172
3173 struct target_type aarch64_target = {
3174 .name = "aarch64",
3175
3176 .poll = aarch64_poll,
3177 .arch_state = armv8_arch_state,
3178
3179 .halt = aarch64_halt,
3180 .resume = aarch64_resume,
3181 .step = aarch64_step,
3182
3183 .assert_reset = aarch64_assert_reset,
3184 .deassert_reset = aarch64_deassert_reset,
3185
3186 /* REVISIT allow exporting VFP3 registers ... */
3187 .get_gdb_arch = armv8_get_gdb_arch,
3188 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3189
3190 .read_memory = aarch64_read_memory,
3191 .write_memory = aarch64_write_memory,
3192
3193 .add_breakpoint = aarch64_add_breakpoint,
3194 .add_context_breakpoint = aarch64_add_context_breakpoint,
3195 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3196 .remove_breakpoint = aarch64_remove_breakpoint,
3197 .add_watchpoint = aarch64_add_watchpoint,
3198 .remove_watchpoint = aarch64_remove_watchpoint,
3199 .hit_watchpoint = aarch64_hit_watchpoint,
3200
3201 .commands = aarch64_command_handlers,
3202 .target_create = aarch64_target_create,
3203 .target_jim_configure = aarch64_jim_configure,
3204 .init_target = aarch64_init_target,
3205 .deinit_target = aarch64_deinit_target,
3206 .examine = aarch64_examine,
3207
3208 .read_phys_memory = aarch64_read_phys_memory,
3209 .write_phys_memory = aarch64_write_phys_memory,
3210 .mmu = aarch64_mmu,
3211 .virt2phys = aarch64_virt2phys,
3212 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)