bcfce659244af6ca099623c1a8dc277e3f1d7877
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 enum restart_mode {
34 RESTART_LAZY,
35 RESTART_SYNC,
36 };
37
38 enum halt_mode {
39 HALT_LAZY,
40 HALT_SYNC,
41 };
42
43 static int aarch64_poll(struct target *target);
44 static int aarch64_debug_entry(struct target *target);
45 static int aarch64_restore_context(struct target *target, bool bpwp);
46 static int aarch64_set_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_context_breakpoint(struct target *target,
49 struct breakpoint *breakpoint, uint8_t matchmode);
50 static int aarch64_set_hybrid_breakpoint(struct target *target,
51 struct breakpoint *breakpoint);
52 static int aarch64_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int aarch64_mmu(struct target *target, int *enabled);
55 static int aarch64_virt2phys(struct target *target,
56 target_addr_t virt, target_addr_t *phys);
57 static int aarch64_read_cpu_memory(struct target *target,
58 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
59
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int aarch64_restore_system_control_reg(struct target *target)
64 {
65 enum arm_mode target_mode = ARM_MODE_ANY;
66 int retval = ERROR_OK;
67 uint32_t instr;
68
69 struct aarch64_common *aarch64 = target_to_aarch64(target);
70 struct armv8_common *armv8 = target_to_armv8(target);
71
72 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
73 aarch64->system_control_reg_curr = aarch64->system_control_reg;
74 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
75
76 switch (armv8->arm.core_mode) {
77 case ARMV8_64_EL0T:
78 target_mode = ARMV8_64_EL1H;
79 /* fall through */
80 case ARMV8_64_EL1T:
81 case ARMV8_64_EL1H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
83 break;
84 case ARMV8_64_EL2T:
85 case ARMV8_64_EL2H:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
87 break;
88 case ARMV8_64_EL3H:
89 case ARMV8_64_EL3T:
90 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
91 break;
92
93 case ARM_MODE_SVC:
94 case ARM_MODE_ABT:
95 case ARM_MODE_FIQ:
96 case ARM_MODE_IRQ:
97 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
98 break;
99
100 default:
101 LOG_INFO("cannot read system control register in this mode");
102 return ERROR_FAIL;
103 }
104
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
111
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114 }
115
116 return retval;
117 }
118
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 uint32_t instr = 0;
128
129 if (enable) {
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64->system_control_reg & 0x1U)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133 return ERROR_FAIL;
134 }
135 if (!(aarch64->system_control_reg_curr & 0x1U))
136 aarch64->system_control_reg_curr |= 0x1U;
137 } else {
138 if (aarch64->system_control_reg_curr & 0x4U) {
139 /* data cache is active */
140 aarch64->system_control_reg_curr &= ~0x4U;
141 /* flush data cache armv8 function to be called */
142 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
144 }
145 if ((aarch64->system_control_reg_curr & 0x1U)) {
146 aarch64->system_control_reg_curr &= ~0x1U;
147 }
148 }
149
150 switch (armv8->arm.core_mode) {
151 case ARMV8_64_EL0T:
152 case ARMV8_64_EL1T:
153 case ARMV8_64_EL1H:
154 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
155 break;
156 case ARMV8_64_EL2T:
157 case ARMV8_64_EL2H:
158 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
159 break;
160 case ARMV8_64_EL3H:
161 case ARMV8_64_EL3T:
162 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
163 break;
164
165 case ARM_MODE_SVC:
166 case ARM_MODE_ABT:
167 case ARM_MODE_FIQ:
168 case ARM_MODE_IRQ:
169 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
170 break;
171
172 default:
173 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
174 break;
175 }
176
177 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
178 aarch64->system_control_reg_curr);
179 return retval;
180 }
181
182 /*
183 * Basic debug access, very low level assumes state is saved
184 */
185 static int aarch64_init_debug_access(struct target *target)
186 {
187 struct armv8_common *armv8 = target_to_armv8(target);
188 int retval;
189 uint32_t dummy;
190
191 LOG_DEBUG("%s", target_name(target));
192
193 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
194 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
195 if (retval != ERROR_OK) {
196 LOG_DEBUG("Examine %s failed", "oslock");
197 return retval;
198 }
199
200 /* Clear Sticky Power Down status Bit in PRSR to enable access to
201 the registers in the Core Power Domain */
202 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
203 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
204 if (retval != ERROR_OK)
205 return retval;
206
207 /*
208 * Static CTI configuration:
209 * Channel 0 -> trigger outputs HALT request to PE
210 * Channel 1 -> trigger outputs Resume request to PE
211 * Gate all channel trigger events from entering the CTM
212 */
213
214 /* Enable CTI */
215 retval = arm_cti_enable(armv8->cti, true);
216 /* By default, gate all channel events to and from the CTM */
217 if (retval == ERROR_OK)
218 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
219 /* output halt requests to PE on channel 0 event */
220 if (retval == ERROR_OK)
221 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
222 /* output restart requests to PE on channel 1 event */
223 if (retval == ERROR_OK)
224 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
225 if (retval != ERROR_OK)
226 return retval;
227
228 /* Resync breakpoint registers */
229
230 return ERROR_OK;
231 }
232
233 /* Write to memory mapped registers directly with no cache or mmu handling */
234 static int aarch64_dap_write_memap_register_u32(struct target *target,
235 uint32_t address,
236 uint32_t value)
237 {
238 int retval;
239 struct armv8_common *armv8 = target_to_armv8(target);
240
241 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
242
243 return retval;
244 }
245
246 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
247 {
248 struct arm_dpm *dpm = &a8->armv8_common.dpm;
249 int retval;
250
251 dpm->arm = &a8->armv8_common.arm;
252 dpm->didr = debug;
253
254 retval = armv8_dpm_setup(dpm);
255 if (retval == ERROR_OK)
256 retval = armv8_dpm_initialize(dpm);
257
258 return retval;
259 }
260
261 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
262 {
263 struct armv8_common *armv8 = target_to_armv8(target);
264 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
265 }
266
267 static int aarch64_check_state_one(struct target *target,
268 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
269 {
270 struct armv8_common *armv8 = target_to_armv8(target);
271 uint32_t prsr;
272 int retval;
273
274 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
275 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
276 if (retval != ERROR_OK)
277 return retval;
278
279 if (p_prsr)
280 *p_prsr = prsr;
281
282 if (p_result)
283 *p_result = (prsr & mask) == (val & mask);
284
285 return ERROR_OK;
286 }
287
288 static int aarch64_wait_halt_one(struct target *target)
289 {
290 int retval = ERROR_OK;
291 uint32_t prsr;
292
293 int64_t then = timeval_ms();
294 for (;;) {
295 int halted;
296
297 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
298 if (retval != ERROR_OK || halted)
299 break;
300
301 if (timeval_ms() > then + 1000) {
302 retval = ERROR_TARGET_TIMEOUT;
303 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
304 break;
305 }
306 }
307 return retval;
308 }
309
310 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
311 {
312 int retval = ERROR_OK;
313 struct target_list *head = target->head;
314 struct target *first = NULL;
315
316 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
317
318 while (head != NULL) {
319 struct target *curr = head->target;
320 struct armv8_common *armv8 = target_to_armv8(curr);
321 head = head->next;
322
323 if (exc_target && curr == target)
324 continue;
325 if (!target_was_examined(curr))
326 continue;
327 if (curr->state != TARGET_RUNNING)
328 continue;
329
330 /* HACK: mark this target as prepared for halting */
331 curr->debug_reason = DBG_REASON_DBGRQ;
332
333 /* open the gate for channel 0 to let HALT requests pass to the CTM */
334 retval = arm_cti_ungate_channel(armv8->cti, 0);
335 if (retval == ERROR_OK)
336 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
337 if (retval != ERROR_OK)
338 break;
339
340 LOG_DEBUG("target %s prepared", target_name(curr));
341
342 if (first == NULL)
343 first = curr;
344 }
345
346 if (p_first) {
347 if (exc_target && first)
348 *p_first = first;
349 else
350 *p_first = target;
351 }
352
353 return retval;
354 }
355
356 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
357 {
358 int retval = ERROR_OK;
359 struct armv8_common *armv8 = target_to_armv8(target);
360
361 LOG_DEBUG("%s", target_name(target));
362
363 /* allow Halting Debug Mode */
364 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
365 if (retval != ERROR_OK)
366 return retval;
367
368 /* trigger an event on channel 0, this outputs a halt request to the PE */
369 retval = arm_cti_pulse_channel(armv8->cti, 0);
370 if (retval != ERROR_OK)
371 return retval;
372
373 if (mode == HALT_SYNC) {
374 retval = aarch64_wait_halt_one(target);
375 if (retval != ERROR_OK) {
376 if (retval == ERROR_TARGET_TIMEOUT)
377 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
378 return retval;
379 }
380 }
381
382 return ERROR_OK;
383 }
384
385 static int aarch64_halt_smp(struct target *target, bool exc_target)
386 {
387 struct target *next = target;
388 int retval;
389
390 /* prepare halt on all PEs of the group */
391 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
392
393 if (exc_target && next == target)
394 return retval;
395
396 /* halt the target PE */
397 if (retval == ERROR_OK)
398 retval = aarch64_halt_one(next, HALT_LAZY);
399
400 if (retval != ERROR_OK)
401 return retval;
402
403 /* wait for all PEs to halt */
404 int64_t then = timeval_ms();
405 for (;;) {
406 bool all_halted = true;
407 struct target_list *head;
408 struct target *curr;
409
410 foreach_smp_target(head, target->head) {
411 int halted;
412
413 curr = head->target;
414
415 if (!target_was_examined(curr))
416 continue;
417
418 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
419 if (retval != ERROR_OK || !halted) {
420 all_halted = false;
421 break;
422 }
423 }
424
425 if (all_halted)
426 break;
427
428 if (timeval_ms() > then + 1000) {
429 retval = ERROR_TARGET_TIMEOUT;
430 break;
431 }
432
433 /*
434 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
435 * and it looks like the CTI's are not connected by a common
436 * trigger matrix. It seems that we need to halt one core in each
437 * cluster explicitly. So if we find that a core has not halted
438 * yet, we trigger an explicit halt for the second cluster.
439 */
440 retval = aarch64_halt_one(curr, HALT_LAZY);
441 if (retval != ERROR_OK)
442 break;
443 }
444
445 return retval;
446 }
447
448 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
449 {
450 struct target *gdb_target = NULL;
451 struct target_list *head;
452 struct target *curr;
453
454 if (debug_reason == DBG_REASON_NOTHALTED) {
455 LOG_INFO("Halting remaining targets in SMP group");
456 aarch64_halt_smp(target, true);
457 }
458
459 /* poll all targets in the group, but skip the target that serves GDB */
460 foreach_smp_target(head, target->head) {
461 curr = head->target;
462 /* skip calling context */
463 if (curr == target)
464 continue;
465 if (!target_was_examined(curr))
466 continue;
467 /* skip targets that were already halted */
468 if (curr->state == TARGET_HALTED)
469 continue;
470 /* remember the gdb_service->target */
471 if (curr->gdb_service != NULL)
472 gdb_target = curr->gdb_service->target;
473 /* skip it */
474 if (curr == gdb_target)
475 continue;
476
477 /* avoid recursion in aarch64_poll() */
478 curr->smp = 0;
479 aarch64_poll(curr);
480 curr->smp = 1;
481 }
482
483 /* after all targets were updated, poll the gdb serving target */
484 if (gdb_target != NULL && gdb_target != target)
485 aarch64_poll(gdb_target);
486
487 return ERROR_OK;
488 }
489
490 /*
491 * Aarch64 Run control
492 */
493
494 static int aarch64_poll(struct target *target)
495 {
496 enum target_state prev_target_state;
497 int retval = ERROR_OK;
498 int halted;
499
500 retval = aarch64_check_state_one(target,
501 PRSR_HALT, PRSR_HALT, &halted, NULL);
502 if (retval != ERROR_OK)
503 return retval;
504
505 if (halted) {
506 prev_target_state = target->state;
507 if (prev_target_state != TARGET_HALTED) {
508 enum target_debug_reason debug_reason = target->debug_reason;
509
510 /* We have a halting debug event */
511 target->state = TARGET_HALTED;
512 LOG_DEBUG("Target %s halted", target_name(target));
513 retval = aarch64_debug_entry(target);
514 if (retval != ERROR_OK)
515 return retval;
516
517 if (target->smp)
518 update_halt_gdb(target, debug_reason);
519
520 switch (prev_target_state) {
521 case TARGET_RUNNING:
522 case TARGET_UNKNOWN:
523 case TARGET_RESET:
524 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
525 break;
526 case TARGET_DEBUG_RUNNING:
527 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
528 break;
529 default:
530 break;
531 }
532 }
533 } else
534 target->state = TARGET_RUNNING;
535
536 return retval;
537 }
538
539 static int aarch64_halt(struct target *target)
540 {
541 if (target->smp)
542 return aarch64_halt_smp(target, false);
543
544 return aarch64_halt_one(target, HALT_SYNC);
545 }
546
547 static int aarch64_restore_one(struct target *target, int current,
548 uint64_t *address, int handle_breakpoints, int debug_execution)
549 {
550 struct armv8_common *armv8 = target_to_armv8(target);
551 struct arm *arm = &armv8->arm;
552 int retval;
553 uint64_t resume_pc;
554
555 LOG_DEBUG("%s", target_name(target));
556
557 if (!debug_execution)
558 target_free_all_working_areas(target);
559
560 /* current = 1: continue on current pc, otherwise continue at <address> */
561 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
562 if (!current)
563 resume_pc = *address;
564 else
565 *address = resume_pc;
566
567 /* Make sure that the Armv7 gdb thumb fixups does not
568 * kill the return address
569 */
570 switch (arm->core_state) {
571 case ARM_STATE_ARM:
572 resume_pc &= 0xFFFFFFFC;
573 break;
574 case ARM_STATE_AARCH64:
575 resume_pc &= 0xFFFFFFFFFFFFFFFC;
576 break;
577 case ARM_STATE_THUMB:
578 case ARM_STATE_THUMB_EE:
579 /* When the return address is loaded into PC
580 * bit 0 must be 1 to stay in Thumb state
581 */
582 resume_pc |= 0x1;
583 break;
584 case ARM_STATE_JAZELLE:
585 LOG_ERROR("How do I resume into Jazelle state??");
586 return ERROR_FAIL;
587 }
588 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
589 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
590 arm->pc->dirty = 1;
591 arm->pc->valid = 1;
592
593 /* called it now before restoring context because it uses cpu
594 * register r0 for restoring system control register */
595 retval = aarch64_restore_system_control_reg(target);
596 if (retval == ERROR_OK)
597 retval = aarch64_restore_context(target, handle_breakpoints);
598
599 return retval;
600 }
601
602 /**
603 * prepare single target for restart
604 *
605 *
606 */
607 static int aarch64_prepare_restart_one(struct target *target)
608 {
609 struct armv8_common *armv8 = target_to_armv8(target);
610 int retval;
611 uint32_t dscr;
612 uint32_t tmp;
613
614 LOG_DEBUG("%s", target_name(target));
615
616 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
617 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
618 if (retval != ERROR_OK)
619 return retval;
620
621 if ((dscr & DSCR_ITE) == 0)
622 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
623 if ((dscr & DSCR_ERR) != 0)
624 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
625
626 /* acknowledge a pending CTI halt event */
627 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
628 /*
629 * open the CTI gate for channel 1 so that the restart events
630 * get passed along to all PEs. Also close gate for channel 0
631 * to isolate the PE from halt events.
632 */
633 if (retval == ERROR_OK)
634 retval = arm_cti_ungate_channel(armv8->cti, 1);
635 if (retval == ERROR_OK)
636 retval = arm_cti_gate_channel(armv8->cti, 0);
637
638 /* make sure that DSCR.HDE is set */
639 if (retval == ERROR_OK) {
640 dscr |= DSCR_HDE;
641 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
642 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
643 }
644
645 /* clear sticky bits in PRSR, SDR is now 0 */
646 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
647 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
648
649 return retval;
650 }
651
652 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
653 {
654 struct armv8_common *armv8 = target_to_armv8(target);
655 int retval;
656
657 LOG_DEBUG("%s", target_name(target));
658
659 /* trigger an event on channel 1, generates a restart request to the PE */
660 retval = arm_cti_pulse_channel(armv8->cti, 1);
661 if (retval != ERROR_OK)
662 return retval;
663
664 if (mode == RESTART_SYNC) {
665 int64_t then = timeval_ms();
666 for (;;) {
667 int resumed;
668 /*
669 * if PRSR.SDR is set now, the target did restart, even
670 * if it's now already halted again (e.g. due to breakpoint)
671 */
672 retval = aarch64_check_state_one(target,
673 PRSR_SDR, PRSR_SDR, &resumed, NULL);
674 if (retval != ERROR_OK || resumed)
675 break;
676
677 if (timeval_ms() > then + 1000) {
678 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
679 retval = ERROR_TARGET_TIMEOUT;
680 break;
681 }
682 }
683 }
684
685 if (retval != ERROR_OK)
686 return retval;
687
688 target->debug_reason = DBG_REASON_NOTHALTED;
689 target->state = TARGET_RUNNING;
690
691 return ERROR_OK;
692 }
693
694 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
695 {
696 int retval;
697
698 LOG_DEBUG("%s", target_name(target));
699
700 retval = aarch64_prepare_restart_one(target);
701 if (retval == ERROR_OK)
702 retval = aarch64_do_restart_one(target, mode);
703
704 return retval;
705 }
706
707 /*
708 * prepare all but the current target for restart
709 */
710 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
711 {
712 int retval = ERROR_OK;
713 struct target_list *head;
714 struct target *first = NULL;
715 uint64_t address;
716
717 foreach_smp_target(head, target->head) {
718 struct target *curr = head->target;
719
720 /* skip calling target */
721 if (curr == target)
722 continue;
723 if (!target_was_examined(curr))
724 continue;
725 if (curr->state != TARGET_HALTED)
726 continue;
727
728 /* resume at current address, not in step mode */
729 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
730 if (retval == ERROR_OK)
731 retval = aarch64_prepare_restart_one(curr);
732 if (retval != ERROR_OK) {
733 LOG_ERROR("failed to restore target %s", target_name(curr));
734 break;
735 }
736 /* remember the first valid target in the group */
737 if (first == NULL)
738 first = curr;
739 }
740
741 if (p_first)
742 *p_first = first;
743
744 return retval;
745 }
746
747
748 static int aarch64_step_restart_smp(struct target *target)
749 {
750 int retval = ERROR_OK;
751 struct target_list *head;
752 struct target *first = NULL;
753
754 LOG_DEBUG("%s", target_name(target));
755
756 retval = aarch64_prep_restart_smp(target, 0, &first);
757 if (retval != ERROR_OK)
758 return retval;
759
760 if (first != NULL)
761 retval = aarch64_do_restart_one(first, RESTART_LAZY);
762 if (retval != ERROR_OK) {
763 LOG_DEBUG("error restarting target %s", target_name(first));
764 return retval;
765 }
766
767 int64_t then = timeval_ms();
768 for (;;) {
769 struct target *curr = target;
770 bool all_resumed = true;
771
772 foreach_smp_target(head, target->head) {
773 uint32_t prsr;
774 int resumed;
775
776 curr = head->target;
777
778 if (curr == target)
779 continue;
780
781 if (!target_was_examined(curr))
782 continue;
783
784 retval = aarch64_check_state_one(curr,
785 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
786 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
787 all_resumed = false;
788 break;
789 }
790
791 if (curr->state != TARGET_RUNNING) {
792 curr->state = TARGET_RUNNING;
793 curr->debug_reason = DBG_REASON_NOTHALTED;
794 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
795 }
796 }
797
798 if (all_resumed)
799 break;
800
801 if (timeval_ms() > then + 1000) {
802 LOG_ERROR("%s: timeout waiting for target resume", __func__);
803 retval = ERROR_TARGET_TIMEOUT;
804 break;
805 }
806 /*
807 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
808 * and it looks like the CTI's are not connected by a common
809 * trigger matrix. It seems that we need to halt one core in each
810 * cluster explicitly. So if we find that a core has not halted
811 * yet, we trigger an explicit resume for the second cluster.
812 */
813 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
814 if (retval != ERROR_OK)
815 break;
816 }
817
818 return retval;
819 }
820
821 static int aarch64_resume(struct target *target, int current,
822 target_addr_t address, int handle_breakpoints, int debug_execution)
823 {
824 int retval = 0;
825 uint64_t addr = address;
826
827 if (target->state != TARGET_HALTED)
828 return ERROR_TARGET_NOT_HALTED;
829
830 /*
831 * If this target is part of a SMP group, prepare the others
832 * targets for resuming. This involves restoring the complete
833 * target register context and setting up CTI gates to accept
834 * resume events from the trigger matrix.
835 */
836 if (target->smp) {
837 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
838 if (retval != ERROR_OK)
839 return retval;
840 }
841
842 /* all targets prepared, restore and restart the current target */
843 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
844 debug_execution);
845 if (retval == ERROR_OK)
846 retval = aarch64_restart_one(target, RESTART_SYNC);
847 if (retval != ERROR_OK)
848 return retval;
849
850 if (target->smp) {
851 int64_t then = timeval_ms();
852 for (;;) {
853 struct target *curr = target;
854 struct target_list *head;
855 bool all_resumed = true;
856
857 foreach_smp_target(head, target->head) {
858 uint32_t prsr;
859 int resumed;
860
861 curr = head->target;
862 if (curr == target)
863 continue;
864 if (!target_was_examined(curr))
865 continue;
866
867 retval = aarch64_check_state_one(curr,
868 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
869 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
870 all_resumed = false;
871 break;
872 }
873
874 if (curr->state != TARGET_RUNNING) {
875 curr->state = TARGET_RUNNING;
876 curr->debug_reason = DBG_REASON_NOTHALTED;
877 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
878 }
879 }
880
881 if (all_resumed)
882 break;
883
884 if (timeval_ms() > then + 1000) {
885 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
886 retval = ERROR_TARGET_TIMEOUT;
887 break;
888 }
889
890 /*
891 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
892 * and it looks like the CTI's are not connected by a common
893 * trigger matrix. It seems that we need to halt one core in each
894 * cluster explicitly. So if we find that a core has not halted
895 * yet, we trigger an explicit resume for the second cluster.
896 */
897 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
898 if (retval != ERROR_OK)
899 break;
900 }
901 }
902
903 if (retval != ERROR_OK)
904 return retval;
905
906 target->debug_reason = DBG_REASON_NOTHALTED;
907
908 if (!debug_execution) {
909 target->state = TARGET_RUNNING;
910 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
911 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
912 } else {
913 target->state = TARGET_DEBUG_RUNNING;
914 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
915 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
916 }
917
918 return ERROR_OK;
919 }
920
921 static int aarch64_debug_entry(struct target *target)
922 {
923 int retval = ERROR_OK;
924 struct armv8_common *armv8 = target_to_armv8(target);
925 struct arm_dpm *dpm = &armv8->dpm;
926 enum arm_state core_state;
927 uint32_t dscr;
928
929 /* make sure to clear all sticky errors */
930 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
931 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
932 if (retval == ERROR_OK)
933 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
934 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
935 if (retval == ERROR_OK)
936 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
937
938 if (retval != ERROR_OK)
939 return retval;
940
941 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
942
943 dpm->dscr = dscr;
944 core_state = armv8_dpm_get_core_state(dpm);
945 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
946 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
947
948 /* close the CTI gate for all events */
949 if (retval == ERROR_OK)
950 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
951 /* discard async exceptions */
952 if (retval == ERROR_OK)
953 retval = dpm->instr_cpsr_sync(dpm);
954 if (retval != ERROR_OK)
955 return retval;
956
957 /* Examine debug reason */
958 armv8_dpm_report_dscr(dpm, dscr);
959
960 /* save address of instruction that triggered the watchpoint? */
961 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
962 uint32_t tmp;
963 uint64_t wfar = 0;
964
965 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
966 armv8->debug_base + CPUV8_DBG_WFAR1,
967 &tmp);
968 if (retval != ERROR_OK)
969 return retval;
970 wfar = tmp;
971 wfar = (wfar << 32);
972 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
973 armv8->debug_base + CPUV8_DBG_WFAR0,
974 &tmp);
975 if (retval != ERROR_OK)
976 return retval;
977 wfar |= tmp;
978 armv8_dpm_report_wfar(&armv8->dpm, wfar);
979 }
980
981 retval = armv8_dpm_read_current_registers(&armv8->dpm);
982
983 if (retval == ERROR_OK && armv8->post_debug_entry)
984 retval = armv8->post_debug_entry(target);
985
986 return retval;
987 }
988
989 static int aarch64_post_debug_entry(struct target *target)
990 {
991 struct aarch64_common *aarch64 = target_to_aarch64(target);
992 struct armv8_common *armv8 = &aarch64->armv8_common;
993 int retval;
994 enum arm_mode target_mode = ARM_MODE_ANY;
995 uint32_t instr;
996
997 switch (armv8->arm.core_mode) {
998 case ARMV8_64_EL0T:
999 target_mode = ARMV8_64_EL1H;
1000 /* fall through */
1001 case ARMV8_64_EL1T:
1002 case ARMV8_64_EL1H:
1003 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1004 break;
1005 case ARMV8_64_EL2T:
1006 case ARMV8_64_EL2H:
1007 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1008 break;
1009 case ARMV8_64_EL3H:
1010 case ARMV8_64_EL3T:
1011 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1012 break;
1013
1014 case ARM_MODE_SVC:
1015 case ARM_MODE_ABT:
1016 case ARM_MODE_FIQ:
1017 case ARM_MODE_IRQ:
1018 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1019 break;
1020
1021 default:
1022 LOG_INFO("cannot read system control register in this mode");
1023 return ERROR_FAIL;
1024 }
1025
1026 if (target_mode != ARM_MODE_ANY)
1027 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1028
1029 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1030 if (retval != ERROR_OK)
1031 return retval;
1032
1033 if (target_mode != ARM_MODE_ANY)
1034 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1035
1036 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1037 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1038
1039 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1040 armv8_identify_cache(armv8);
1041 armv8_read_mpidr(armv8);
1042 }
1043
1044 armv8->armv8_mmu.mmu_enabled =
1045 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1046 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1047 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1048 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1049 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1050 return ERROR_OK;
1051 }
1052
1053 /*
1054 * single-step a target
1055 */
1056 static int aarch64_step(struct target *target, int current, target_addr_t address,
1057 int handle_breakpoints)
1058 {
1059 struct armv8_common *armv8 = target_to_armv8(target);
1060 struct aarch64_common *aarch64 = target_to_aarch64(target);
1061 int saved_retval = ERROR_OK;
1062 int retval;
1063 uint32_t edecr;
1064
1065 if (target->state != TARGET_HALTED) {
1066 LOG_WARNING("target not halted");
1067 return ERROR_TARGET_NOT_HALTED;
1068 }
1069
1070 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1071 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1072 /* make sure EDECR.SS is not set when restoring the register */
1073
1074 if (retval == ERROR_OK) {
1075 edecr &= ~0x4;
1076 /* set EDECR.SS to enter hardware step mode */
1077 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1078 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1079 }
1080 /* disable interrupts while stepping */
1081 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1082 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1083 /* bail out if stepping setup has failed */
1084 if (retval != ERROR_OK)
1085 return retval;
1086
1087 if (target->smp && !handle_breakpoints) {
1088 /*
1089 * isolate current target so that it doesn't get resumed
1090 * together with the others
1091 */
1092 retval = arm_cti_gate_channel(armv8->cti, 1);
1093 /* resume all other targets in the group */
1094 if (retval == ERROR_OK)
1095 retval = aarch64_step_restart_smp(target);
1096 if (retval != ERROR_OK) {
1097 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1098 return retval;
1099 }
1100 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1101 }
1102
1103 /* all other targets running, restore and restart the current target */
1104 retval = aarch64_restore_one(target, current, &address, 0, 0);
1105 if (retval == ERROR_OK)
1106 retval = aarch64_restart_one(target, RESTART_LAZY);
1107
1108 if (retval != ERROR_OK)
1109 return retval;
1110
1111 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1112 if (!handle_breakpoints)
1113 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1114
1115 int64_t then = timeval_ms();
1116 for (;;) {
1117 int stepped;
1118 uint32_t prsr;
1119
1120 retval = aarch64_check_state_one(target,
1121 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1122 if (retval != ERROR_OK || stepped)
1123 break;
1124
1125 if (timeval_ms() > then + 100) {
1126 LOG_ERROR("timeout waiting for target %s halt after step",
1127 target_name(target));
1128 retval = ERROR_TARGET_TIMEOUT;
1129 break;
1130 }
1131 }
1132
1133 /*
1134 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1135 * causes a timeout. The core takes the step but doesn't complete it and so
1136 * debug state is never entered. However, you can manually halt the core
1137 * as an external debug even is also a WFI wakeup event.
1138 */
1139 if (retval == ERROR_TARGET_TIMEOUT)
1140 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1141
1142 /* restore EDECR */
1143 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1144 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1145 if (retval != ERROR_OK)
1146 return retval;
1147
1148 /* restore interrupts */
1149 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1150 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1151 if (retval != ERROR_OK)
1152 return ERROR_OK;
1153 }
1154
1155 if (saved_retval != ERROR_OK)
1156 return saved_retval;
1157
1158 return aarch64_poll(target);
1159 }
1160
1161 static int aarch64_restore_context(struct target *target, bool bpwp)
1162 {
1163 struct armv8_common *armv8 = target_to_armv8(target);
1164 struct arm *arm = &armv8->arm;
1165
1166 int retval;
1167
1168 LOG_DEBUG("%s", target_name(target));
1169
1170 if (armv8->pre_restore_context)
1171 armv8->pre_restore_context(target);
1172
1173 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1174 if (retval == ERROR_OK) {
1175 /* registers are now invalid */
1176 register_cache_invalidate(arm->core_cache);
1177 register_cache_invalidate(arm->core_cache->next);
1178 }
1179
1180 return retval;
1181 }
1182
1183 /*
1184 * Cortex-A8 Breakpoint and watchpoint functions
1185 */
1186
1187 /* Setup hardware Breakpoint Register Pair */
1188 static int aarch64_set_breakpoint(struct target *target,
1189 struct breakpoint *breakpoint, uint8_t matchmode)
1190 {
1191 int retval;
1192 int brp_i = 0;
1193 uint32_t control;
1194 uint8_t byte_addr_select = 0x0F;
1195 struct aarch64_common *aarch64 = target_to_aarch64(target);
1196 struct armv8_common *armv8 = &aarch64->armv8_common;
1197 struct aarch64_brp *brp_list = aarch64->brp_list;
1198
1199 if (breakpoint->set) {
1200 LOG_WARNING("breakpoint already set");
1201 return ERROR_OK;
1202 }
1203
1204 if (breakpoint->type == BKPT_HARD) {
1205 int64_t bpt_value;
1206 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1207 brp_i++;
1208 if (brp_i >= aarch64->brp_num) {
1209 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1210 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1211 }
1212 breakpoint->set = brp_i + 1;
1213 if (breakpoint->length == 2)
1214 byte_addr_select = (3 << (breakpoint->address & 0x02));
1215 control = ((matchmode & 0x7) << 20)
1216 | (1 << 13)
1217 | (byte_addr_select << 5)
1218 | (3 << 1) | 1;
1219 brp_list[brp_i].used = 1;
1220 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1221 brp_list[brp_i].control = control;
1222 bpt_value = brp_list[brp_i].value;
1223
1224 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1225 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1226 (uint32_t)(bpt_value & 0xFFFFFFFF));
1227 if (retval != ERROR_OK)
1228 return retval;
1229 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1230 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1231 (uint32_t)(bpt_value >> 32));
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1236 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1237 brp_list[brp_i].control);
1238 if (retval != ERROR_OK)
1239 return retval;
1240 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1241 brp_list[brp_i].control,
1242 brp_list[brp_i].value);
1243
1244 } else if (breakpoint->type == BKPT_SOFT) {
1245 uint8_t code[4];
1246
1247 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1248 retval = target_read_memory(target,
1249 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1250 breakpoint->length, 1,
1251 breakpoint->orig_instr);
1252 if (retval != ERROR_OK)
1253 return retval;
1254
1255 armv8_cache_d_inner_flush_virt(armv8,
1256 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1257 breakpoint->length);
1258
1259 retval = target_write_memory(target,
1260 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1261 breakpoint->length, 1, code);
1262 if (retval != ERROR_OK)
1263 return retval;
1264
1265 armv8_cache_d_inner_flush_virt(armv8,
1266 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1267 breakpoint->length);
1268
1269 armv8_cache_i_inner_inval_virt(armv8,
1270 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1271 breakpoint->length);
1272
1273 breakpoint->set = 0x11; /* Any nice value but 0 */
1274 }
1275
1276 /* Ensure that halting debug mode is enable */
1277 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1278 if (retval != ERROR_OK) {
1279 LOG_DEBUG("Failed to set DSCR.HDE");
1280 return retval;
1281 }
1282
1283 return ERROR_OK;
1284 }
1285
1286 static int aarch64_set_context_breakpoint(struct target *target,
1287 struct breakpoint *breakpoint, uint8_t matchmode)
1288 {
1289 int retval = ERROR_FAIL;
1290 int brp_i = 0;
1291 uint32_t control;
1292 uint8_t byte_addr_select = 0x0F;
1293 struct aarch64_common *aarch64 = target_to_aarch64(target);
1294 struct armv8_common *armv8 = &aarch64->armv8_common;
1295 struct aarch64_brp *brp_list = aarch64->brp_list;
1296
1297 if (breakpoint->set) {
1298 LOG_WARNING("breakpoint already set");
1299 return retval;
1300 }
1301 /*check available context BRPs*/
1302 while ((brp_list[brp_i].used ||
1303 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1304 brp_i++;
1305
1306 if (brp_i >= aarch64->brp_num) {
1307 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1308 return ERROR_FAIL;
1309 }
1310
1311 breakpoint->set = brp_i + 1;
1312 control = ((matchmode & 0x7) << 20)
1313 | (1 << 13)
1314 | (byte_addr_select << 5)
1315 | (3 << 1) | 1;
1316 brp_list[brp_i].used = 1;
1317 brp_list[brp_i].value = (breakpoint->asid);
1318 brp_list[brp_i].control = control;
1319 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1320 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1321 brp_list[brp_i].value);
1322 if (retval != ERROR_OK)
1323 return retval;
1324 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1325 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1326 brp_list[brp_i].control);
1327 if (retval != ERROR_OK)
1328 return retval;
1329 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1330 brp_list[brp_i].control,
1331 brp_list[brp_i].value);
1332 return ERROR_OK;
1333
1334 }
1335
1336 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1337 {
1338 int retval = ERROR_FAIL;
1339 int brp_1 = 0; /* holds the contextID pair */
1340 int brp_2 = 0; /* holds the IVA pair */
1341 uint32_t control_CTX, control_IVA;
1342 uint8_t CTX_byte_addr_select = 0x0F;
1343 uint8_t IVA_byte_addr_select = 0x0F;
1344 uint8_t CTX_machmode = 0x03;
1345 uint8_t IVA_machmode = 0x01;
1346 struct aarch64_common *aarch64 = target_to_aarch64(target);
1347 struct armv8_common *armv8 = &aarch64->armv8_common;
1348 struct aarch64_brp *brp_list = aarch64->brp_list;
1349
1350 if (breakpoint->set) {
1351 LOG_WARNING("breakpoint already set");
1352 return retval;
1353 }
1354 /*check available context BRPs*/
1355 while ((brp_list[brp_1].used ||
1356 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1357 brp_1++;
1358
1359 printf("brp(CTX) found num: %d\n", brp_1);
1360 if (brp_1 >= aarch64->brp_num) {
1361 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1362 return ERROR_FAIL;
1363 }
1364
1365 while ((brp_list[brp_2].used ||
1366 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1367 brp_2++;
1368
1369 printf("brp(IVA) found num: %d\n", brp_2);
1370 if (brp_2 >= aarch64->brp_num) {
1371 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1372 return ERROR_FAIL;
1373 }
1374
1375 breakpoint->set = brp_1 + 1;
1376 breakpoint->linked_BRP = brp_2;
1377 control_CTX = ((CTX_machmode & 0x7) << 20)
1378 | (brp_2 << 16)
1379 | (0 << 14)
1380 | (CTX_byte_addr_select << 5)
1381 | (3 << 1) | 1;
1382 brp_list[brp_1].used = 1;
1383 brp_list[brp_1].value = (breakpoint->asid);
1384 brp_list[brp_1].control = control_CTX;
1385 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1386 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1387 brp_list[brp_1].value);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1391 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1392 brp_list[brp_1].control);
1393 if (retval != ERROR_OK)
1394 return retval;
1395
1396 control_IVA = ((IVA_machmode & 0x7) << 20)
1397 | (brp_1 << 16)
1398 | (1 << 13)
1399 | (IVA_byte_addr_select << 5)
1400 | (3 << 1) | 1;
1401 brp_list[brp_2].used = 1;
1402 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1403 brp_list[brp_2].control = control_IVA;
1404 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1405 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1406 brp_list[brp_2].value & 0xFFFFFFFF);
1407 if (retval != ERROR_OK)
1408 return retval;
1409 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1410 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1411 brp_list[brp_2].value >> 32);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1415 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1416 brp_list[brp_2].control);
1417 if (retval != ERROR_OK)
1418 return retval;
1419
1420 return ERROR_OK;
1421 }
1422
1423 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1424 {
1425 int retval;
1426 struct aarch64_common *aarch64 = target_to_aarch64(target);
1427 struct armv8_common *armv8 = &aarch64->armv8_common;
1428 struct aarch64_brp *brp_list = aarch64->brp_list;
1429
1430 if (!breakpoint->set) {
1431 LOG_WARNING("breakpoint not set");
1432 return ERROR_OK;
1433 }
1434
1435 if (breakpoint->type == BKPT_HARD) {
1436 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1437 int brp_i = breakpoint->set - 1;
1438 int brp_j = breakpoint->linked_BRP;
1439 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1440 LOG_DEBUG("Invalid BRP number in breakpoint");
1441 return ERROR_OK;
1442 }
1443 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1444 brp_list[brp_i].control, brp_list[brp_i].value);
1445 brp_list[brp_i].used = 0;
1446 brp_list[brp_i].value = 0;
1447 brp_list[brp_i].control = 0;
1448 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1449 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1450 brp_list[brp_i].control);
1451 if (retval != ERROR_OK)
1452 return retval;
1453 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1454 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1455 (uint32_t)brp_list[brp_i].value);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1459 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1460 (uint32_t)brp_list[brp_i].value);
1461 if (retval != ERROR_OK)
1462 return retval;
1463 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1464 LOG_DEBUG("Invalid BRP number in breakpoint");
1465 return ERROR_OK;
1466 }
1467 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1468 brp_list[brp_j].control, brp_list[brp_j].value);
1469 brp_list[brp_j].used = 0;
1470 brp_list[brp_j].value = 0;
1471 brp_list[brp_j].control = 0;
1472 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1473 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1474 brp_list[brp_j].control);
1475 if (retval != ERROR_OK)
1476 return retval;
1477 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1478 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1479 (uint32_t)brp_list[brp_j].value);
1480 if (retval != ERROR_OK)
1481 return retval;
1482 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1483 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1484 (uint32_t)brp_list[brp_j].value);
1485 if (retval != ERROR_OK)
1486 return retval;
1487
1488 breakpoint->linked_BRP = 0;
1489 breakpoint->set = 0;
1490 return ERROR_OK;
1491
1492 } else {
1493 int brp_i = breakpoint->set - 1;
1494 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1495 LOG_DEBUG("Invalid BRP number in breakpoint");
1496 return ERROR_OK;
1497 }
1498 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1499 brp_list[brp_i].control, brp_list[brp_i].value);
1500 brp_list[brp_i].used = 0;
1501 brp_list[brp_i].value = 0;
1502 brp_list[brp_i].control = 0;
1503 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1504 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1505 brp_list[brp_i].control);
1506 if (retval != ERROR_OK)
1507 return retval;
1508 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1509 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1510 brp_list[brp_i].value);
1511 if (retval != ERROR_OK)
1512 return retval;
1513
1514 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1515 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1516 (uint32_t)brp_list[brp_i].value);
1517 if (retval != ERROR_OK)
1518 return retval;
1519 breakpoint->set = 0;
1520 return ERROR_OK;
1521 }
1522 } else {
1523 /* restore original instruction (kept in target endianness) */
1524
1525 armv8_cache_d_inner_flush_virt(armv8,
1526 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1527 breakpoint->length);
1528
1529 if (breakpoint->length == 4) {
1530 retval = target_write_memory(target,
1531 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1532 4, 1, breakpoint->orig_instr);
1533 if (retval != ERROR_OK)
1534 return retval;
1535 } else {
1536 retval = target_write_memory(target,
1537 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1538 2, 1, breakpoint->orig_instr);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 }
1542
1543 armv8_cache_d_inner_flush_virt(armv8,
1544 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1545 breakpoint->length);
1546
1547 armv8_cache_i_inner_inval_virt(armv8,
1548 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1549 breakpoint->length);
1550 }
1551 breakpoint->set = 0;
1552
1553 return ERROR_OK;
1554 }
1555
1556 static int aarch64_add_breakpoint(struct target *target,
1557 struct breakpoint *breakpoint)
1558 {
1559 struct aarch64_common *aarch64 = target_to_aarch64(target);
1560
1561 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1562 LOG_INFO("no hardware breakpoint available");
1563 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1564 }
1565
1566 if (breakpoint->type == BKPT_HARD)
1567 aarch64->brp_num_available--;
1568
1569 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1570 }
1571
1572 static int aarch64_add_context_breakpoint(struct target *target,
1573 struct breakpoint *breakpoint)
1574 {
1575 struct aarch64_common *aarch64 = target_to_aarch64(target);
1576
1577 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1578 LOG_INFO("no hardware breakpoint available");
1579 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1580 }
1581
1582 if (breakpoint->type == BKPT_HARD)
1583 aarch64->brp_num_available--;
1584
1585 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1586 }
1587
1588 static int aarch64_add_hybrid_breakpoint(struct target *target,
1589 struct breakpoint *breakpoint)
1590 {
1591 struct aarch64_common *aarch64 = target_to_aarch64(target);
1592
1593 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1594 LOG_INFO("no hardware breakpoint available");
1595 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1596 }
1597
1598 if (breakpoint->type == BKPT_HARD)
1599 aarch64->brp_num_available--;
1600
1601 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1602 }
1603
1604
1605 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1606 {
1607 struct aarch64_common *aarch64 = target_to_aarch64(target);
1608
1609 #if 0
1610 /* It is perfectly possible to remove breakpoints while the target is running */
1611 if (target->state != TARGET_HALTED) {
1612 LOG_WARNING("target not halted");
1613 return ERROR_TARGET_NOT_HALTED;
1614 }
1615 #endif
1616
1617 if (breakpoint->set) {
1618 aarch64_unset_breakpoint(target, breakpoint);
1619 if (breakpoint->type == BKPT_HARD)
1620 aarch64->brp_num_available++;
1621 }
1622
1623 return ERROR_OK;
1624 }
1625
1626 /*
1627 * Cortex-A8 Reset functions
1628 */
1629
1630 static int aarch64_assert_reset(struct target *target)
1631 {
1632 struct armv8_common *armv8 = target_to_armv8(target);
1633
1634 LOG_DEBUG(" ");
1635
1636 /* FIXME when halt is requested, make it work somehow... */
1637
1638 /* Issue some kind of warm reset. */
1639 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1640 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1641 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1642 /* REVISIT handle "pulls" cases, if there's
1643 * hardware that needs them to work.
1644 */
1645 jtag_add_reset(0, 1);
1646 } else {
1647 LOG_ERROR("%s: how to reset?", target_name(target));
1648 return ERROR_FAIL;
1649 }
1650
1651 /* registers are now invalid */
1652 if (target_was_examined(target)) {
1653 register_cache_invalidate(armv8->arm.core_cache);
1654 register_cache_invalidate(armv8->arm.core_cache->next);
1655 }
1656
1657 target->state = TARGET_RESET;
1658
1659 return ERROR_OK;
1660 }
1661
1662 static int aarch64_deassert_reset(struct target *target)
1663 {
1664 int retval;
1665
1666 LOG_DEBUG(" ");
1667
1668 /* be certain SRST is off */
1669 jtag_add_reset(0, 0);
1670
1671 if (!target_was_examined(target))
1672 return ERROR_OK;
1673
1674 retval = aarch64_poll(target);
1675 if (retval != ERROR_OK)
1676 return retval;
1677
1678 if (target->reset_halt) {
1679 if (target->state != TARGET_HALTED) {
1680 LOG_WARNING("%s: ran after reset and before halt ...",
1681 target_name(target));
1682 retval = target_halt(target);
1683 if (retval != ERROR_OK)
1684 return retval;
1685 }
1686 }
1687
1688 return aarch64_init_debug_access(target);
1689 }
1690
1691 static int aarch64_write_cpu_memory_slow(struct target *target,
1692 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1693 {
1694 struct armv8_common *armv8 = target_to_armv8(target);
1695 struct arm_dpm *dpm = &armv8->dpm;
1696 struct arm *arm = &armv8->arm;
1697 int retval;
1698
1699 armv8_reg_current(arm, 1)->dirty = true;
1700
1701 /* change DCC to normal mode if necessary */
1702 if (*dscr & DSCR_MA) {
1703 *dscr &= ~DSCR_MA;
1704 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1705 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1706 if (retval != ERROR_OK)
1707 return retval;
1708 }
1709
1710 while (count) {
1711 uint32_t data, opcode;
1712
1713 /* write the data to store into DTRRX */
1714 if (size == 1)
1715 data = *buffer;
1716 else if (size == 2)
1717 data = target_buffer_get_u16(target, buffer);
1718 else
1719 data = target_buffer_get_u32(target, buffer);
1720 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1721 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1722 if (retval != ERROR_OK)
1723 return retval;
1724
1725 if (arm->core_state == ARM_STATE_AARCH64)
1726 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1727 else
1728 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1729 if (retval != ERROR_OK)
1730 return retval;
1731
1732 if (size == 1)
1733 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1734 else if (size == 2)
1735 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1736 else
1737 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1738 retval = dpm->instr_execute(dpm, opcode);
1739 if (retval != ERROR_OK)
1740 return retval;
1741
1742 /* Advance */
1743 buffer += size;
1744 --count;
1745 }
1746
1747 return ERROR_OK;
1748 }
1749
1750 static int aarch64_write_cpu_memory_fast(struct target *target,
1751 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1752 {
1753 struct armv8_common *armv8 = target_to_armv8(target);
1754 struct arm *arm = &armv8->arm;
1755 int retval;
1756
1757 armv8_reg_current(arm, 1)->dirty = true;
1758
1759 /* Step 1.d - Change DCC to memory mode */
1760 *dscr |= DSCR_MA;
1761 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1762 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1763 if (retval != ERROR_OK)
1764 return retval;
1765
1766
1767 /* Step 2.a - Do the write */
1768 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1769 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1770 if (retval != ERROR_OK)
1771 return retval;
1772
1773 /* Step 3.a - Switch DTR mode back to Normal mode */
1774 *dscr &= ~DSCR_MA;
1775 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1776 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1777 if (retval != ERROR_OK)
1778 return retval;
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int aarch64_write_cpu_memory(struct target *target,
1784 uint64_t address, uint32_t size,
1785 uint32_t count, const uint8_t *buffer)
1786 {
1787 /* write memory through APB-AP */
1788 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1789 struct armv8_common *armv8 = target_to_armv8(target);
1790 struct arm_dpm *dpm = &armv8->dpm;
1791 struct arm *arm = &armv8->arm;
1792 uint32_t dscr;
1793
1794 if (target->state != TARGET_HALTED) {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED;
1797 }
1798
1799 /* Mark register X0 as dirty, as it will be used
1800 * for transferring the data.
1801 * It will be restored automatically when exiting
1802 * debug mode
1803 */
1804 armv8_reg_current(arm, 0)->dirty = true;
1805
1806 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1807
1808 /* Read DSCR */
1809 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1810 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1811 if (retval != ERROR_OK)
1812 return retval;
1813
1814 /* Set Normal access mode */
1815 dscr = (dscr & ~DSCR_MA);
1816 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1817 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1818
1819 if (arm->core_state == ARM_STATE_AARCH64) {
1820 /* Write X0 with value 'address' using write procedure */
1821 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1822 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1823 retval = dpm->instr_write_data_dcc_64(dpm,
1824 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1825 } else {
1826 /* Write R0 with value 'address' using write procedure */
1827 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1828 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1829 dpm->instr_write_data_dcc(dpm,
1830 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1831 }
1832
1833 if (size == 4 && (address % 4) == 0)
1834 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1835 else
1836 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1837
1838 if (retval != ERROR_OK) {
1839 /* Unset DTR mode */
1840 mem_ap_read_atomic_u32(armv8->debug_ap,
1841 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1842 dscr &= ~DSCR_MA;
1843 mem_ap_write_atomic_u32(armv8->debug_ap,
1844 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1845 }
1846
1847 /* Check for sticky abort flags in the DSCR */
1848 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1849 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1850 if (retval != ERROR_OK)
1851 return retval;
1852
1853 dpm->dscr = dscr;
1854 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1855 /* Abort occurred - clear it and exit */
1856 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1857 armv8_dpm_handle_exception(dpm);
1858 return ERROR_FAIL;
1859 }
1860
1861 /* Done */
1862 return ERROR_OK;
1863 }
1864
1865 static int aarch64_read_cpu_memory_slow(struct target *target,
1866 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1867 {
1868 struct armv8_common *armv8 = target_to_armv8(target);
1869 struct arm_dpm *dpm = &armv8->dpm;
1870 struct arm *arm = &armv8->arm;
1871 int retval;
1872
1873 armv8_reg_current(arm, 1)->dirty = true;
1874
1875 /* change DCC to normal mode (if necessary) */
1876 if (*dscr & DSCR_MA) {
1877 *dscr &= DSCR_MA;
1878 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1879 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1880 if (retval != ERROR_OK)
1881 return retval;
1882 }
1883
1884 while (count) {
1885 uint32_t opcode, data;
1886
1887 if (size == 1)
1888 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1889 else if (size == 2)
1890 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1891 else
1892 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1893 retval = dpm->instr_execute(dpm, opcode);
1894 if (retval != ERROR_OK)
1895 return retval;
1896
1897 if (arm->core_state == ARM_STATE_AARCH64)
1898 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1899 else
1900 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1901 if (retval != ERROR_OK)
1902 return retval;
1903
1904 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1905 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1906 if (retval != ERROR_OK)
1907 return retval;
1908
1909 if (size == 1)
1910 *buffer = (uint8_t)data;
1911 else if (size == 2)
1912 target_buffer_set_u16(target, buffer, (uint16_t)data);
1913 else
1914 target_buffer_set_u32(target, buffer, data);
1915
1916 /* Advance */
1917 buffer += size;
1918 --count;
1919 }
1920
1921 return ERROR_OK;
1922 }
1923
1924 static int aarch64_read_cpu_memory_fast(struct target *target,
1925 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1926 {
1927 struct armv8_common *armv8 = target_to_armv8(target);
1928 struct arm_dpm *dpm = &armv8->dpm;
1929 struct arm *arm = &armv8->arm;
1930 int retval;
1931 uint32_t value;
1932
1933 /* Mark X1 as dirty */
1934 armv8_reg_current(arm, 1)->dirty = true;
1935
1936 if (arm->core_state == ARM_STATE_AARCH64) {
1937 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1938 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1939 } else {
1940 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1941 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1942 }
1943
1944 /* Step 1.e - Change DCC to memory mode */
1945 *dscr |= DSCR_MA;
1946 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1947 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1948 /* Step 1.f - read DBGDTRTX and discard the value */
1949 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1950 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1951
1952 count--;
1953 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1954 * Abort flags are sticky, so can be read at end of transactions
1955 *
1956 * This data is read in aligned to 32 bit boundary.
1957 */
1958
1959 if (count) {
1960 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1961 * increments X0 by 4. */
1962 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1963 armv8->debug_base + CPUV8_DBG_DTRTX);
1964 if (retval != ERROR_OK)
1965 return retval;
1966 }
1967
1968 /* Step 3.a - set DTR access mode back to Normal mode */
1969 *dscr &= ~DSCR_MA;
1970 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1971 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1972 if (retval != ERROR_OK)
1973 return retval;
1974
1975 /* Step 3.b - read DBGDTRTX for the final value */
1976 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1977 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1978 if (retval != ERROR_OK)
1979 return retval;
1980
1981 target_buffer_set_u32(target, buffer + count * 4, value);
1982 return retval;
1983 }
1984
1985 static int aarch64_read_cpu_memory(struct target *target,
1986 target_addr_t address, uint32_t size,
1987 uint32_t count, uint8_t *buffer)
1988 {
1989 /* read memory through APB-AP */
1990 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1991 struct armv8_common *armv8 = target_to_armv8(target);
1992 struct arm_dpm *dpm = &armv8->dpm;
1993 struct arm *arm = &armv8->arm;
1994 uint32_t dscr;
1995
1996 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
1997 address, size, count);
1998
1999 if (target->state != TARGET_HALTED) {
2000 LOG_WARNING("target not halted");
2001 return ERROR_TARGET_NOT_HALTED;
2002 }
2003
2004 /* Mark register X0 as dirty, as it will be used
2005 * for transferring the data.
2006 * It will be restored automatically when exiting
2007 * debug mode
2008 */
2009 armv8_reg_current(arm, 0)->dirty = true;
2010
2011 /* Read DSCR */
2012 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2013 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2014
2015 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2016
2017 /* Set Normal access mode */
2018 dscr &= ~DSCR_MA;
2019 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2020 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2021
2022 if (arm->core_state == ARM_STATE_AARCH64) {
2023 /* Write X0 with value 'address' using write procedure */
2024 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2025 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2026 retval += dpm->instr_write_data_dcc_64(dpm,
2027 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2028 } else {
2029 /* Write R0 with value 'address' using write procedure */
2030 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2031 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2032 retval += dpm->instr_write_data_dcc(dpm,
2033 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2034 }
2035
2036 if (size == 4 && (address % 4) == 0)
2037 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2038 else
2039 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2040
2041 if (dscr & DSCR_MA) {
2042 dscr &= ~DSCR_MA;
2043 mem_ap_write_atomic_u32(armv8->debug_ap,
2044 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2045 }
2046
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 /* Check for sticky abort flags in the DSCR */
2051 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2052 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2053 if (retval != ERROR_OK)
2054 return retval;
2055
2056 dpm->dscr = dscr;
2057
2058 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2059 /* Abort occurred - clear it and exit */
2060 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2061 armv8_dpm_handle_exception(dpm);
2062 return ERROR_FAIL;
2063 }
2064
2065 /* Done */
2066 return ERROR_OK;
2067 }
2068
2069 static int aarch64_read_phys_memory(struct target *target,
2070 target_addr_t address, uint32_t size,
2071 uint32_t count, uint8_t *buffer)
2072 {
2073 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2074
2075 if (count && buffer) {
2076 /* read memory through APB-AP */
2077 retval = aarch64_mmu_modify(target, 0);
2078 if (retval != ERROR_OK)
2079 return retval;
2080 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2081 }
2082 return retval;
2083 }
2084
2085 static int aarch64_read_memory(struct target *target, target_addr_t address,
2086 uint32_t size, uint32_t count, uint8_t *buffer)
2087 {
2088 int mmu_enabled = 0;
2089 int retval;
2090
2091 /* determine if MMU was enabled on target stop */
2092 retval = aarch64_mmu(target, &mmu_enabled);
2093 if (retval != ERROR_OK)
2094 return retval;
2095
2096 if (mmu_enabled) {
2097 /* enable MMU as we could have disabled it for phys access */
2098 retval = aarch64_mmu_modify(target, 1);
2099 if (retval != ERROR_OK)
2100 return retval;
2101 }
2102 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2103 }
2104
2105 static int aarch64_write_phys_memory(struct target *target,
2106 target_addr_t address, uint32_t size,
2107 uint32_t count, const uint8_t *buffer)
2108 {
2109 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2110
2111 if (count && buffer) {
2112 /* write memory through APB-AP */
2113 retval = aarch64_mmu_modify(target, 0);
2114 if (retval != ERROR_OK)
2115 return retval;
2116 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2117 }
2118
2119 return retval;
2120 }
2121
2122 static int aarch64_write_memory(struct target *target, target_addr_t address,
2123 uint32_t size, uint32_t count, const uint8_t *buffer)
2124 {
2125 int mmu_enabled = 0;
2126 int retval;
2127
2128 /* determine if MMU was enabled on target stop */
2129 retval = aarch64_mmu(target, &mmu_enabled);
2130 if (retval != ERROR_OK)
2131 return retval;
2132
2133 if (mmu_enabled) {
2134 /* enable MMU as we could have disabled it for phys access */
2135 retval = aarch64_mmu_modify(target, 1);
2136 if (retval != ERROR_OK)
2137 return retval;
2138 }
2139 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2140 }
2141
2142 static int aarch64_handle_target_request(void *priv)
2143 {
2144 struct target *target = priv;
2145 struct armv8_common *armv8 = target_to_armv8(target);
2146 int retval;
2147
2148 if (!target_was_examined(target))
2149 return ERROR_OK;
2150 if (!target->dbg_msg_enabled)
2151 return ERROR_OK;
2152
2153 if (target->state == TARGET_RUNNING) {
2154 uint32_t request;
2155 uint32_t dscr;
2156 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2157 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2158
2159 /* check if we have data */
2160 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2161 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2162 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2163 if (retval == ERROR_OK) {
2164 target_request(target, request);
2165 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2166 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2167 }
2168 }
2169 }
2170
2171 return ERROR_OK;
2172 }
2173
2174 static int aarch64_examine_first(struct target *target)
2175 {
2176 struct aarch64_common *aarch64 = target_to_aarch64(target);
2177 struct armv8_common *armv8 = &aarch64->armv8_common;
2178 struct adiv5_dap *swjdp = armv8->arm.dap;
2179 uint32_t cti_base;
2180 int i;
2181 int retval = ERROR_OK;
2182 uint64_t debug, ttypr;
2183 uint32_t cpuid;
2184 uint32_t tmp0, tmp1, tmp2, tmp3;
2185 debug = ttypr = cpuid = 0;
2186
2187 retval = dap_dp_init(swjdp);
2188 if (retval != ERROR_OK)
2189 return retval;
2190
2191 /* Search for the APB-AB - it is needed for access to debug registers */
2192 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2193 if (retval != ERROR_OK) {
2194 LOG_ERROR("Could not find APB-AP for debug access");
2195 return retval;
2196 }
2197
2198 retval = mem_ap_init(armv8->debug_ap);
2199 if (retval != ERROR_OK) {
2200 LOG_ERROR("Could not initialize the APB-AP");
2201 return retval;
2202 }
2203
2204 armv8->debug_ap->memaccess_tck = 10;
2205
2206 if (!target->dbgbase_set) {
2207 uint32_t dbgbase;
2208 /* Get ROM Table base */
2209 uint32_t apid;
2210 int32_t coreidx = target->coreid;
2211 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2212 if (retval != ERROR_OK)
2213 return retval;
2214 /* Lookup 0x15 -- Processor DAP */
2215 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2216 &armv8->debug_base, &coreidx);
2217 if (retval != ERROR_OK)
2218 return retval;
2219 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2220 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2221 } else
2222 armv8->debug_base = target->dbgbase;
2223
2224 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2225 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2226 if (retval != ERROR_OK) {
2227 LOG_DEBUG("Examine %s failed", "oslock");
2228 return retval;
2229 }
2230
2231 retval = mem_ap_read_u32(armv8->debug_ap,
2232 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2233 if (retval != ERROR_OK) {
2234 LOG_DEBUG("Examine %s failed", "CPUID");
2235 return retval;
2236 }
2237
2238 retval = mem_ap_read_u32(armv8->debug_ap,
2239 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2240 retval += mem_ap_read_u32(armv8->debug_ap,
2241 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2242 if (retval != ERROR_OK) {
2243 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2244 return retval;
2245 }
2246 retval = mem_ap_read_u32(armv8->debug_ap,
2247 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2248 retval += mem_ap_read_u32(armv8->debug_ap,
2249 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2250 if (retval != ERROR_OK) {
2251 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2252 return retval;
2253 }
2254
2255 retval = dap_run(armv8->debug_ap->dap);
2256 if (retval != ERROR_OK) {
2257 LOG_ERROR("%s: examination failed\n", target_name(target));
2258 return retval;
2259 }
2260
2261 ttypr |= tmp1;
2262 ttypr = (ttypr << 32) | tmp0;
2263 debug |= tmp3;
2264 debug = (debug << 32) | tmp2;
2265
2266 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2267 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2268 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2269
2270 if (target->ctibase == 0) {
2271 /* assume a v8 rom table layout */
2272 cti_base = armv8->debug_base + 0x10000;
2273 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, cti_base);
2274 } else
2275 cti_base = target->ctibase;
2276
2277 armv8->cti = arm_cti_create(armv8->debug_ap, cti_base);
2278 if (armv8->cti == NULL)
2279 return ERROR_FAIL;
2280
2281 retval = aarch64_dpm_setup(aarch64, debug);
2282 if (retval != ERROR_OK)
2283 return retval;
2284
2285 /* Setup Breakpoint Register Pairs */
2286 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2287 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2288 aarch64->brp_num_available = aarch64->brp_num;
2289 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2290 for (i = 0; i < aarch64->brp_num; i++) {
2291 aarch64->brp_list[i].used = 0;
2292 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2293 aarch64->brp_list[i].type = BRP_NORMAL;
2294 else
2295 aarch64->brp_list[i].type = BRP_CONTEXT;
2296 aarch64->brp_list[i].value = 0;
2297 aarch64->brp_list[i].control = 0;
2298 aarch64->brp_list[i].BRPn = i;
2299 }
2300
2301 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2302
2303 target->state = TARGET_UNKNOWN;
2304 target->debug_reason = DBG_REASON_NOTHALTED;
2305 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2306 target_set_examined(target);
2307 return ERROR_OK;
2308 }
2309
2310 static int aarch64_examine(struct target *target)
2311 {
2312 int retval = ERROR_OK;
2313
2314 /* don't re-probe hardware after each reset */
2315 if (!target_was_examined(target))
2316 retval = aarch64_examine_first(target);
2317
2318 /* Configure core debug access */
2319 if (retval == ERROR_OK)
2320 retval = aarch64_init_debug_access(target);
2321
2322 return retval;
2323 }
2324
2325 /*
2326 * Cortex-A8 target creation and initialization
2327 */
2328
2329 static int aarch64_init_target(struct command_context *cmd_ctx,
2330 struct target *target)
2331 {
2332 /* examine_first() does a bunch of this */
2333 return ERROR_OK;
2334 }
2335
2336 static int aarch64_init_arch_info(struct target *target,
2337 struct aarch64_common *aarch64, struct jtag_tap *tap)
2338 {
2339 struct armv8_common *armv8 = &aarch64->armv8_common;
2340
2341 /* Setup struct aarch64_common */
2342 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2343 /* tap has no dap initialized */
2344 if (!tap->dap) {
2345 tap->dap = dap_init();
2346 tap->dap->tap = tap;
2347 }
2348 armv8->arm.dap = tap->dap;
2349
2350 /* register arch-specific functions */
2351 armv8->examine_debug_reason = NULL;
2352 armv8->post_debug_entry = aarch64_post_debug_entry;
2353 armv8->pre_restore_context = NULL;
2354 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2355
2356 armv8_init_arch_info(target, armv8);
2357 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2358
2359 return ERROR_OK;
2360 }
2361
2362 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2363 {
2364 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2365
2366 return aarch64_init_arch_info(target, aarch64, target->tap);
2367 }
2368
2369 static int aarch64_mmu(struct target *target, int *enabled)
2370 {
2371 if (target->state != TARGET_HALTED) {
2372 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2373 return ERROR_TARGET_INVALID;
2374 }
2375
2376 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2377 return ERROR_OK;
2378 }
2379
2380 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2381 target_addr_t *phys)
2382 {
2383 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2384 }
2385
2386 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2387 {
2388 struct target *target = get_current_target(CMD_CTX);
2389 struct armv8_common *armv8 = target_to_armv8(target);
2390
2391 return armv8_handle_cache_info_command(CMD_CTX,
2392 &armv8->armv8_mmu.armv8_cache);
2393 }
2394
2395
2396 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2397 {
2398 struct target *target = get_current_target(CMD_CTX);
2399 if (!target_was_examined(target)) {
2400 LOG_ERROR("target not examined yet");
2401 return ERROR_FAIL;
2402 }
2403
2404 return aarch64_init_debug_access(target);
2405 }
2406 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2407 {
2408 struct target *target = get_current_target(CMD_CTX);
2409 /* check target is an smp target */
2410 struct target_list *head;
2411 struct target *curr;
2412 head = target->head;
2413 target->smp = 0;
2414 if (head != (struct target_list *)NULL) {
2415 while (head != (struct target_list *)NULL) {
2416 curr = head->target;
2417 curr->smp = 0;
2418 head = head->next;
2419 }
2420 /* fixes the target display to the debugger */
2421 target->gdb_service->target = target;
2422 }
2423 return ERROR_OK;
2424 }
2425
2426 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2427 {
2428 struct target *target = get_current_target(CMD_CTX);
2429 struct target_list *head;
2430 struct target *curr;
2431 head = target->head;
2432 if (head != (struct target_list *)NULL) {
2433 target->smp = 1;
2434 while (head != (struct target_list *)NULL) {
2435 curr = head->target;
2436 curr->smp = 1;
2437 head = head->next;
2438 }
2439 }
2440 return ERROR_OK;
2441 }
2442
2443 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2444 {
2445 struct target *target = get_current_target(CMD_CTX);
2446 struct aarch64_common *aarch64 = target_to_aarch64(target);
2447
2448 static const Jim_Nvp nvp_maskisr_modes[] = {
2449 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2450 { .name = "on", .value = AARCH64_ISRMASK_ON },
2451 { .name = NULL, .value = -1 },
2452 };
2453 const Jim_Nvp *n;
2454
2455 if (CMD_ARGC > 0) {
2456 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2457 if (n->name == NULL) {
2458 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2459 return ERROR_COMMAND_SYNTAX_ERROR;
2460 }
2461
2462 aarch64->isrmasking_mode = n->value;
2463 }
2464
2465 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2466 command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2467
2468 return ERROR_OK;
2469 }
2470
2471 static const struct command_registration aarch64_exec_command_handlers[] = {
2472 {
2473 .name = "cache_info",
2474 .handler = aarch64_handle_cache_info_command,
2475 .mode = COMMAND_EXEC,
2476 .help = "display information about target caches",
2477 .usage = "",
2478 },
2479 {
2480 .name = "dbginit",
2481 .handler = aarch64_handle_dbginit_command,
2482 .mode = COMMAND_EXEC,
2483 .help = "Initialize core debug",
2484 .usage = "",
2485 },
2486 { .name = "smp_off",
2487 .handler = aarch64_handle_smp_off_command,
2488 .mode = COMMAND_EXEC,
2489 .help = "Stop smp handling",
2490 .usage = "",
2491 },
2492 {
2493 .name = "smp_on",
2494 .handler = aarch64_handle_smp_on_command,
2495 .mode = COMMAND_EXEC,
2496 .help = "Restart smp handling",
2497 .usage = "",
2498 },
2499 {
2500 .name = "maskisr",
2501 .handler = aarch64_mask_interrupts_command,
2502 .mode = COMMAND_ANY,
2503 .help = "mask aarch64 interrupts during single-step",
2504 .usage = "['on'|'off']",
2505 },
2506
2507 COMMAND_REGISTRATION_DONE
2508 };
2509 static const struct command_registration aarch64_command_handlers[] = {
2510 {
2511 .chain = armv8_command_handlers,
2512 },
2513 {
2514 .name = "aarch64",
2515 .mode = COMMAND_ANY,
2516 .help = "Aarch64 command group",
2517 .usage = "",
2518 .chain = aarch64_exec_command_handlers,
2519 },
2520 COMMAND_REGISTRATION_DONE
2521 };
2522
2523 struct target_type aarch64_target = {
2524 .name = "aarch64",
2525
2526 .poll = aarch64_poll,
2527 .arch_state = armv8_arch_state,
2528
2529 .halt = aarch64_halt,
2530 .resume = aarch64_resume,
2531 .step = aarch64_step,
2532
2533 .assert_reset = aarch64_assert_reset,
2534 .deassert_reset = aarch64_deassert_reset,
2535
2536 /* REVISIT allow exporting VFP3 registers ... */
2537 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2538
2539 .read_memory = aarch64_read_memory,
2540 .write_memory = aarch64_write_memory,
2541
2542 .add_breakpoint = aarch64_add_breakpoint,
2543 .add_context_breakpoint = aarch64_add_context_breakpoint,
2544 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2545 .remove_breakpoint = aarch64_remove_breakpoint,
2546 .add_watchpoint = NULL,
2547 .remove_watchpoint = NULL,
2548
2549 .commands = aarch64_command_handlers,
2550 .target_create = aarch64_target_create,
2551 .init_target = aarch64_init_target,
2552 .examine = aarch64_examine,
2553
2554 .read_phys_memory = aarch64_read_phys_memory,
2555 .write_phys_memory = aarch64_write_phys_memory,
2556 .mmu = aarch64_mmu,
2557 .virt2phys = aarch64_virt2phys,
2558 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)