Aarch64:Switch to EL1 from EL0 before manipulate MMU
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "a64_disassembler.h"
27 #include "register.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_semihosting.h"
33 #include "jtag/interface.h"
34 #include "smp.h"
35 #include <helper/time_support.h>
36
37 enum restart_mode {
38 RESTART_LAZY,
39 RESTART_SYNC,
40 };
41
42 enum halt_mode {
43 HALT_LAZY,
44 HALT_SYNC,
45 };
46
47 struct aarch64_private_config {
48 struct adiv5_private_config adiv5_config;
49 struct arm_cti *cti;
50 };
51
52 static int aarch64_poll(struct target *target);
53 static int aarch64_debug_entry(struct target *target);
54 static int aarch64_restore_context(struct target *target, bool bpwp);
55 static int aarch64_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int aarch64_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int aarch64_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int aarch64_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int aarch64_mmu(struct target *target, int *enabled);
64 static int aarch64_virt2phys(struct target *target,
65 target_addr_t virt, target_addr_t *phys);
66 static int aarch64_read_cpu_memory(struct target *target,
67 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
68
69 static int aarch64_restore_system_control_reg(struct target *target)
70 {
71 enum arm_mode target_mode = ARM_MODE_ANY;
72 int retval = ERROR_OK;
73 uint32_t instr;
74
75 struct aarch64_common *aarch64 = target_to_aarch64(target);
76 struct armv8_common *armv8 = target_to_armv8(target);
77
78 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
79 aarch64->system_control_reg_curr = aarch64->system_control_reg;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81
82 switch (armv8->arm.core_mode) {
83 case ARMV8_64_EL0T:
84 target_mode = ARMV8_64_EL1H;
85 /* fall through */
86 case ARMV8_64_EL1T:
87 case ARMV8_64_EL1H:
88 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
89 break;
90 case ARMV8_64_EL2T:
91 case ARMV8_64_EL2H:
92 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
93 break;
94 case ARMV8_64_EL3H:
95 case ARMV8_64_EL3T:
96 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
97 break;
98
99 case ARM_MODE_SVC:
100 case ARM_MODE_ABT:
101 case ARM_MODE_FIQ:
102 case ARM_MODE_IRQ:
103 case ARM_MODE_HYP:
104 case ARM_MODE_SYS:
105 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
106 break;
107
108 default:
109 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
110 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
111 return ERROR_FAIL;
112 }
113
114 if (target_mode != ARM_MODE_ANY)
115 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
116
117 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
118 if (retval != ERROR_OK)
119 return retval;
120
121 if (target_mode != ARM_MODE_ANY)
122 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
123 }
124
125 return retval;
126 }
127
128 /* modify system_control_reg in order to enable or disable mmu for :
129 * - virt2phys address conversion
130 * - read or write memory in phys or virt address */
131 static int aarch64_mmu_modify(struct target *target, int enable)
132 {
133 struct aarch64_common *aarch64 = target_to_aarch64(target);
134 struct armv8_common *armv8 = &aarch64->armv8_common;
135 int retval = ERROR_OK;
136 enum arm_mode target_mode = ARM_MODE_ANY;
137 uint32_t instr = 0;
138
139 if (enable) {
140 /* if mmu enabled at target stop and mmu not enable */
141 if (!(aarch64->system_control_reg & 0x1U)) {
142 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
143 return ERROR_FAIL;
144 }
145 if (!(aarch64->system_control_reg_curr & 0x1U))
146 aarch64->system_control_reg_curr |= 0x1U;
147 } else {
148 if (aarch64->system_control_reg_curr & 0x4U) {
149 /* data cache is active */
150 aarch64->system_control_reg_curr &= ~0x4U;
151 /* flush data cache armv8 function to be called */
152 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
153 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
154 }
155 if ((aarch64->system_control_reg_curr & 0x1U)) {
156 aarch64->system_control_reg_curr &= ~0x1U;
157 }
158 }
159
160 switch (armv8->arm.core_mode) {
161 case ARMV8_64_EL0T:
162 target_mode = ARMV8_64_EL1H;
163 /* fall through */
164 case ARMV8_64_EL1T:
165 case ARMV8_64_EL1H:
166 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
167 break;
168 case ARMV8_64_EL2T:
169 case ARMV8_64_EL2H:
170 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
171 break;
172 case ARMV8_64_EL3H:
173 case ARMV8_64_EL3T:
174 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
175 break;
176
177 case ARM_MODE_SVC:
178 case ARM_MODE_ABT:
179 case ARM_MODE_FIQ:
180 case ARM_MODE_IRQ:
181 case ARM_MODE_HYP:
182 case ARM_MODE_SYS:
183 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
184 break;
185
186 default:
187 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
188 break;
189 }
190 if (target_mode != ARM_MODE_ANY)
191 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
192
193 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
194 aarch64->system_control_reg_curr);
195
196 if (target_mode != ARM_MODE_ANY)
197 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
198
199 return retval;
200 }
201
202 /*
203 * Basic debug access, very low level assumes state is saved
204 */
205 static int aarch64_init_debug_access(struct target *target)
206 {
207 struct armv8_common *armv8 = target_to_armv8(target);
208 int retval;
209 uint32_t dummy;
210
211 LOG_DEBUG("%s", target_name(target));
212
213 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
214 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
215 if (retval != ERROR_OK) {
216 LOG_DEBUG("Examine %s failed", "oslock");
217 return retval;
218 }
219
220 /* Clear Sticky Power Down status Bit in PRSR to enable access to
221 the registers in the Core Power Domain */
222 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
223 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
224 if (retval != ERROR_OK)
225 return retval;
226
227 /*
228 * Static CTI configuration:
229 * Channel 0 -> trigger outputs HALT request to PE
230 * Channel 1 -> trigger outputs Resume request to PE
231 * Gate all channel trigger events from entering the CTM
232 */
233
234 /* Enable CTI */
235 retval = arm_cti_enable(armv8->cti, true);
236 /* By default, gate all channel events to and from the CTM */
237 if (retval == ERROR_OK)
238 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
239 /* output halt requests to PE on channel 0 event */
240 if (retval == ERROR_OK)
241 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
242 /* output restart requests to PE on channel 1 event */
243 if (retval == ERROR_OK)
244 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 return ERROR_OK;
251 }
252
253 /* Write to memory mapped registers directly with no cache or mmu handling */
254 static int aarch64_dap_write_memap_register_u32(struct target *target,
255 uint32_t address,
256 uint32_t value)
257 {
258 int retval;
259 struct armv8_common *armv8 = target_to_armv8(target);
260
261 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
262
263 return retval;
264 }
265
266 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
267 {
268 struct arm_dpm *dpm = &a8->armv8_common.dpm;
269 int retval;
270
271 dpm->arm = &a8->armv8_common.arm;
272 dpm->didr = debug;
273
274 retval = armv8_dpm_setup(dpm);
275 if (retval == ERROR_OK)
276 retval = armv8_dpm_initialize(dpm);
277
278 return retval;
279 }
280
281 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
282 {
283 struct armv8_common *armv8 = target_to_armv8(target);
284 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
285 }
286
287 static int aarch64_check_state_one(struct target *target,
288 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
289 {
290 struct armv8_common *armv8 = target_to_armv8(target);
291 uint32_t prsr;
292 int retval;
293
294 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
295 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
296 if (retval != ERROR_OK)
297 return retval;
298
299 if (p_prsr)
300 *p_prsr = prsr;
301
302 if (p_result)
303 *p_result = (prsr & mask) == (val & mask);
304
305 return ERROR_OK;
306 }
307
308 static int aarch64_wait_halt_one(struct target *target)
309 {
310 int retval = ERROR_OK;
311 uint32_t prsr;
312
313 int64_t then = timeval_ms();
314 for (;;) {
315 int halted;
316
317 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
318 if (retval != ERROR_OK || halted)
319 break;
320
321 if (timeval_ms() > then + 1000) {
322 retval = ERROR_TARGET_TIMEOUT;
323 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
324 break;
325 }
326 }
327 return retval;
328 }
329
330 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
331 {
332 int retval = ERROR_OK;
333 struct target_list *head = target->head;
334 struct target *first = NULL;
335
336 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
337
338 while (head != NULL) {
339 struct target *curr = head->target;
340 struct armv8_common *armv8 = target_to_armv8(curr);
341 head = head->next;
342
343 if (exc_target && curr == target)
344 continue;
345 if (!target_was_examined(curr))
346 continue;
347 if (curr->state != TARGET_RUNNING)
348 continue;
349
350 /* HACK: mark this target as prepared for halting */
351 curr->debug_reason = DBG_REASON_DBGRQ;
352
353 /* open the gate for channel 0 to let HALT requests pass to the CTM */
354 retval = arm_cti_ungate_channel(armv8->cti, 0);
355 if (retval == ERROR_OK)
356 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
357 if (retval != ERROR_OK)
358 break;
359
360 LOG_DEBUG("target %s prepared", target_name(curr));
361
362 if (first == NULL)
363 first = curr;
364 }
365
366 if (p_first) {
367 if (exc_target && first)
368 *p_first = first;
369 else
370 *p_first = target;
371 }
372
373 return retval;
374 }
375
376 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
377 {
378 int retval = ERROR_OK;
379 struct armv8_common *armv8 = target_to_armv8(target);
380
381 LOG_DEBUG("%s", target_name(target));
382
383 /* allow Halting Debug Mode */
384 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
385 if (retval != ERROR_OK)
386 return retval;
387
388 /* trigger an event on channel 0, this outputs a halt request to the PE */
389 retval = arm_cti_pulse_channel(armv8->cti, 0);
390 if (retval != ERROR_OK)
391 return retval;
392
393 if (mode == HALT_SYNC) {
394 retval = aarch64_wait_halt_one(target);
395 if (retval != ERROR_OK) {
396 if (retval == ERROR_TARGET_TIMEOUT)
397 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
398 return retval;
399 }
400 }
401
402 return ERROR_OK;
403 }
404
405 static int aarch64_halt_smp(struct target *target, bool exc_target)
406 {
407 struct target *next = target;
408 int retval;
409
410 /* prepare halt on all PEs of the group */
411 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
412
413 if (exc_target && next == target)
414 return retval;
415
416 /* halt the target PE */
417 if (retval == ERROR_OK)
418 retval = aarch64_halt_one(next, HALT_LAZY);
419
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* wait for all PEs to halt */
424 int64_t then = timeval_ms();
425 for (;;) {
426 bool all_halted = true;
427 struct target_list *head;
428 struct target *curr;
429
430 foreach_smp_target(head, target->head) {
431 int halted;
432
433 curr = head->target;
434
435 if (!target_was_examined(curr))
436 continue;
437
438 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
439 if (retval != ERROR_OK || !halted) {
440 all_halted = false;
441 break;
442 }
443 }
444
445 if (all_halted)
446 break;
447
448 if (timeval_ms() > then + 1000) {
449 retval = ERROR_TARGET_TIMEOUT;
450 break;
451 }
452
453 /*
454 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
455 * and it looks like the CTI's are not connected by a common
456 * trigger matrix. It seems that we need to halt one core in each
457 * cluster explicitly. So if we find that a core has not halted
458 * yet, we trigger an explicit halt for the second cluster.
459 */
460 retval = aarch64_halt_one(curr, HALT_LAZY);
461 if (retval != ERROR_OK)
462 break;
463 }
464
465 return retval;
466 }
467
468 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
469 {
470 struct target *gdb_target = NULL;
471 struct target_list *head;
472 struct target *curr;
473
474 if (debug_reason == DBG_REASON_NOTHALTED) {
475 LOG_DEBUG("Halting remaining targets in SMP group");
476 aarch64_halt_smp(target, true);
477 }
478
479 /* poll all targets in the group, but skip the target that serves GDB */
480 foreach_smp_target(head, target->head) {
481 curr = head->target;
482 /* skip calling context */
483 if (curr == target)
484 continue;
485 if (!target_was_examined(curr))
486 continue;
487 /* skip targets that were already halted */
488 if (curr->state == TARGET_HALTED)
489 continue;
490 /* remember the gdb_service->target */
491 if (curr->gdb_service != NULL)
492 gdb_target = curr->gdb_service->target;
493 /* skip it */
494 if (curr == gdb_target)
495 continue;
496
497 /* avoid recursion in aarch64_poll() */
498 curr->smp = 0;
499 aarch64_poll(curr);
500 curr->smp = 1;
501 }
502
503 /* after all targets were updated, poll the gdb serving target */
504 if (gdb_target != NULL && gdb_target != target)
505 aarch64_poll(gdb_target);
506
507 return ERROR_OK;
508 }
509
510 /*
511 * Aarch64 Run control
512 */
513
514 static int aarch64_poll(struct target *target)
515 {
516 enum target_state prev_target_state;
517 int retval = ERROR_OK;
518 int halted;
519
520 retval = aarch64_check_state_one(target,
521 PRSR_HALT, PRSR_HALT, &halted, NULL);
522 if (retval != ERROR_OK)
523 return retval;
524
525 if (halted) {
526 prev_target_state = target->state;
527 if (prev_target_state != TARGET_HALTED) {
528 enum target_debug_reason debug_reason = target->debug_reason;
529
530 /* We have a halting debug event */
531 target->state = TARGET_HALTED;
532 LOG_DEBUG("Target %s halted", target_name(target));
533 retval = aarch64_debug_entry(target);
534 if (retval != ERROR_OK)
535 return retval;
536
537 if (target->smp)
538 update_halt_gdb(target, debug_reason);
539
540 if (arm_semihosting(target, &retval) != 0)
541 return retval;
542
543 switch (prev_target_state) {
544 case TARGET_RUNNING:
545 case TARGET_UNKNOWN:
546 case TARGET_RESET:
547 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
548 break;
549 case TARGET_DEBUG_RUNNING:
550 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
551 break;
552 default:
553 break;
554 }
555 }
556 } else
557 target->state = TARGET_RUNNING;
558
559 return retval;
560 }
561
562 static int aarch64_halt(struct target *target)
563 {
564 struct armv8_common *armv8 = target_to_armv8(target);
565 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
566
567 if (target->smp)
568 return aarch64_halt_smp(target, false);
569
570 return aarch64_halt_one(target, HALT_SYNC);
571 }
572
573 static int aarch64_restore_one(struct target *target, int current,
574 uint64_t *address, int handle_breakpoints, int debug_execution)
575 {
576 struct armv8_common *armv8 = target_to_armv8(target);
577 struct arm *arm = &armv8->arm;
578 int retval;
579 uint64_t resume_pc;
580
581 LOG_DEBUG("%s", target_name(target));
582
583 if (!debug_execution)
584 target_free_all_working_areas(target);
585
586 /* current = 1: continue on current pc, otherwise continue at <address> */
587 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
588 if (!current)
589 resume_pc = *address;
590 else
591 *address = resume_pc;
592
593 /* Make sure that the Armv7 gdb thumb fixups does not
594 * kill the return address
595 */
596 switch (arm->core_state) {
597 case ARM_STATE_ARM:
598 resume_pc &= 0xFFFFFFFC;
599 break;
600 case ARM_STATE_AARCH64:
601 resume_pc &= 0xFFFFFFFFFFFFFFFC;
602 break;
603 case ARM_STATE_THUMB:
604 case ARM_STATE_THUMB_EE:
605 /* When the return address is loaded into PC
606 * bit 0 must be 1 to stay in Thumb state
607 */
608 resume_pc |= 0x1;
609 break;
610 case ARM_STATE_JAZELLE:
611 LOG_ERROR("How do I resume into Jazelle state??");
612 return ERROR_FAIL;
613 }
614 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
615 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
616 arm->pc->dirty = true;
617 arm->pc->valid = true;
618
619 /* called it now before restoring context because it uses cpu
620 * register r0 for restoring system control register */
621 retval = aarch64_restore_system_control_reg(target);
622 if (retval == ERROR_OK)
623 retval = aarch64_restore_context(target, handle_breakpoints);
624
625 return retval;
626 }
627
628 /**
629 * prepare single target for restart
630 *
631 *
632 */
633 static int aarch64_prepare_restart_one(struct target *target)
634 {
635 struct armv8_common *armv8 = target_to_armv8(target);
636 int retval;
637 uint32_t dscr;
638 uint32_t tmp;
639
640 LOG_DEBUG("%s", target_name(target));
641
642 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
643 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 if ((dscr & DSCR_ITE) == 0)
648 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
649 if ((dscr & DSCR_ERR) != 0)
650 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
651
652 /* acknowledge a pending CTI halt event */
653 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
654 /*
655 * open the CTI gate for channel 1 so that the restart events
656 * get passed along to all PEs. Also close gate for channel 0
657 * to isolate the PE from halt events.
658 */
659 if (retval == ERROR_OK)
660 retval = arm_cti_ungate_channel(armv8->cti, 1);
661 if (retval == ERROR_OK)
662 retval = arm_cti_gate_channel(armv8->cti, 0);
663
664 /* make sure that DSCR.HDE is set */
665 if (retval == ERROR_OK) {
666 dscr |= DSCR_HDE;
667 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
668 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
669 }
670
671 if (retval == ERROR_OK) {
672 /* clear sticky bits in PRSR, SDR is now 0 */
673 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
674 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
675 }
676
677 return retval;
678 }
679
680 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
681 {
682 struct armv8_common *armv8 = target_to_armv8(target);
683 int retval;
684
685 LOG_DEBUG("%s", target_name(target));
686
687 /* trigger an event on channel 1, generates a restart request to the PE */
688 retval = arm_cti_pulse_channel(armv8->cti, 1);
689 if (retval != ERROR_OK)
690 return retval;
691
692 if (mode == RESTART_SYNC) {
693 int64_t then = timeval_ms();
694 for (;;) {
695 int resumed;
696 /*
697 * if PRSR.SDR is set now, the target did restart, even
698 * if it's now already halted again (e.g. due to breakpoint)
699 */
700 retval = aarch64_check_state_one(target,
701 PRSR_SDR, PRSR_SDR, &resumed, NULL);
702 if (retval != ERROR_OK || resumed)
703 break;
704
705 if (timeval_ms() > then + 1000) {
706 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
707 retval = ERROR_TARGET_TIMEOUT;
708 break;
709 }
710 }
711 }
712
713 if (retval != ERROR_OK)
714 return retval;
715
716 target->debug_reason = DBG_REASON_NOTHALTED;
717 target->state = TARGET_RUNNING;
718
719 return ERROR_OK;
720 }
721
722 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
723 {
724 int retval;
725
726 LOG_DEBUG("%s", target_name(target));
727
728 retval = aarch64_prepare_restart_one(target);
729 if (retval == ERROR_OK)
730 retval = aarch64_do_restart_one(target, mode);
731
732 return retval;
733 }
734
735 /*
736 * prepare all but the current target for restart
737 */
738 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
739 {
740 int retval = ERROR_OK;
741 struct target_list *head;
742 struct target *first = NULL;
743 uint64_t address;
744
745 foreach_smp_target(head, target->head) {
746 struct target *curr = head->target;
747
748 /* skip calling target */
749 if (curr == target)
750 continue;
751 if (!target_was_examined(curr))
752 continue;
753 if (curr->state != TARGET_HALTED)
754 continue;
755
756 /* resume at current address, not in step mode */
757 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
758 if (retval == ERROR_OK)
759 retval = aarch64_prepare_restart_one(curr);
760 if (retval != ERROR_OK) {
761 LOG_ERROR("failed to restore target %s", target_name(curr));
762 break;
763 }
764 /* remember the first valid target in the group */
765 if (first == NULL)
766 first = curr;
767 }
768
769 if (p_first)
770 *p_first = first;
771
772 return retval;
773 }
774
775
776 static int aarch64_step_restart_smp(struct target *target)
777 {
778 int retval = ERROR_OK;
779 struct target_list *head;
780 struct target *first = NULL;
781
782 LOG_DEBUG("%s", target_name(target));
783
784 retval = aarch64_prep_restart_smp(target, 0, &first);
785 if (retval != ERROR_OK)
786 return retval;
787
788 if (first != NULL)
789 retval = aarch64_do_restart_one(first, RESTART_LAZY);
790 if (retval != ERROR_OK) {
791 LOG_DEBUG("error restarting target %s", target_name(first));
792 return retval;
793 }
794
795 int64_t then = timeval_ms();
796 for (;;) {
797 struct target *curr = target;
798 bool all_resumed = true;
799
800 foreach_smp_target(head, target->head) {
801 uint32_t prsr;
802 int resumed;
803
804 curr = head->target;
805
806 if (curr == target)
807 continue;
808
809 if (!target_was_examined(curr))
810 continue;
811
812 retval = aarch64_check_state_one(curr,
813 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
814 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
815 all_resumed = false;
816 break;
817 }
818
819 if (curr->state != TARGET_RUNNING) {
820 curr->state = TARGET_RUNNING;
821 curr->debug_reason = DBG_REASON_NOTHALTED;
822 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
823 }
824 }
825
826 if (all_resumed)
827 break;
828
829 if (timeval_ms() > then + 1000) {
830 LOG_ERROR("%s: timeout waiting for target resume", __func__);
831 retval = ERROR_TARGET_TIMEOUT;
832 break;
833 }
834 /*
835 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
836 * and it looks like the CTI's are not connected by a common
837 * trigger matrix. It seems that we need to halt one core in each
838 * cluster explicitly. So if we find that a core has not halted
839 * yet, we trigger an explicit resume for the second cluster.
840 */
841 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
842 if (retval != ERROR_OK)
843 break;
844 }
845
846 return retval;
847 }
848
849 static int aarch64_resume(struct target *target, int current,
850 target_addr_t address, int handle_breakpoints, int debug_execution)
851 {
852 int retval = 0;
853 uint64_t addr = address;
854
855 struct armv8_common *armv8 = target_to_armv8(target);
856 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
857
858 if (target->state != TARGET_HALTED)
859 return ERROR_TARGET_NOT_HALTED;
860
861 /*
862 * If this target is part of a SMP group, prepare the others
863 * targets for resuming. This involves restoring the complete
864 * target register context and setting up CTI gates to accept
865 * resume events from the trigger matrix.
866 */
867 if (target->smp) {
868 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
869 if (retval != ERROR_OK)
870 return retval;
871 }
872
873 /* all targets prepared, restore and restart the current target */
874 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
875 debug_execution);
876 if (retval == ERROR_OK)
877 retval = aarch64_restart_one(target, RESTART_SYNC);
878 if (retval != ERROR_OK)
879 return retval;
880
881 if (target->smp) {
882 int64_t then = timeval_ms();
883 for (;;) {
884 struct target *curr = target;
885 struct target_list *head;
886 bool all_resumed = true;
887
888 foreach_smp_target(head, target->head) {
889 uint32_t prsr;
890 int resumed;
891
892 curr = head->target;
893 if (curr == target)
894 continue;
895 if (!target_was_examined(curr))
896 continue;
897
898 retval = aarch64_check_state_one(curr,
899 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
900 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
901 all_resumed = false;
902 break;
903 }
904
905 if (curr->state != TARGET_RUNNING) {
906 curr->state = TARGET_RUNNING;
907 curr->debug_reason = DBG_REASON_NOTHALTED;
908 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
909 }
910 }
911
912 if (all_resumed)
913 break;
914
915 if (timeval_ms() > then + 1000) {
916 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
917 retval = ERROR_TARGET_TIMEOUT;
918 break;
919 }
920
921 /*
922 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
923 * and it looks like the CTI's are not connected by a common
924 * trigger matrix. It seems that we need to halt one core in each
925 * cluster explicitly. So if we find that a core has not halted
926 * yet, we trigger an explicit resume for the second cluster.
927 */
928 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
929 if (retval != ERROR_OK)
930 break;
931 }
932 }
933
934 if (retval != ERROR_OK)
935 return retval;
936
937 target->debug_reason = DBG_REASON_NOTHALTED;
938
939 if (!debug_execution) {
940 target->state = TARGET_RUNNING;
941 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
942 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
943 } else {
944 target->state = TARGET_DEBUG_RUNNING;
945 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
946 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
947 }
948
949 return ERROR_OK;
950 }
951
952 static int aarch64_debug_entry(struct target *target)
953 {
954 int retval = ERROR_OK;
955 struct armv8_common *armv8 = target_to_armv8(target);
956 struct arm_dpm *dpm = &armv8->dpm;
957 enum arm_state core_state;
958 uint32_t dscr;
959
960 /* make sure to clear all sticky errors */
961 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
962 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
963 if (retval == ERROR_OK)
964 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
965 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
966 if (retval == ERROR_OK)
967 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
968
969 if (retval != ERROR_OK)
970 return retval;
971
972 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
973
974 dpm->dscr = dscr;
975 core_state = armv8_dpm_get_core_state(dpm);
976 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
977 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
978
979 /* close the CTI gate for all events */
980 if (retval == ERROR_OK)
981 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
982 /* discard async exceptions */
983 if (retval == ERROR_OK)
984 retval = dpm->instr_cpsr_sync(dpm);
985 if (retval != ERROR_OK)
986 return retval;
987
988 /* Examine debug reason */
989 armv8_dpm_report_dscr(dpm, dscr);
990
991 /* save address of instruction that triggered the watchpoint? */
992 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
993 uint32_t tmp;
994 uint64_t wfar = 0;
995
996 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
997 armv8->debug_base + CPUV8_DBG_WFAR1,
998 &tmp);
999 if (retval != ERROR_OK)
1000 return retval;
1001 wfar = tmp;
1002 wfar = (wfar << 32);
1003 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1004 armv8->debug_base + CPUV8_DBG_WFAR0,
1005 &tmp);
1006 if (retval != ERROR_OK)
1007 return retval;
1008 wfar |= tmp;
1009 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1010 }
1011
1012 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1013
1014 if (retval == ERROR_OK && armv8->post_debug_entry)
1015 retval = armv8->post_debug_entry(target);
1016
1017 return retval;
1018 }
1019
1020 static int aarch64_post_debug_entry(struct target *target)
1021 {
1022 struct aarch64_common *aarch64 = target_to_aarch64(target);
1023 struct armv8_common *armv8 = &aarch64->armv8_common;
1024 int retval;
1025 enum arm_mode target_mode = ARM_MODE_ANY;
1026 uint32_t instr;
1027
1028 switch (armv8->arm.core_mode) {
1029 case ARMV8_64_EL0T:
1030 target_mode = ARMV8_64_EL1H;
1031 /* fall through */
1032 case ARMV8_64_EL1T:
1033 case ARMV8_64_EL1H:
1034 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1035 break;
1036 case ARMV8_64_EL2T:
1037 case ARMV8_64_EL2H:
1038 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1039 break;
1040 case ARMV8_64_EL3H:
1041 case ARMV8_64_EL3T:
1042 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1043 break;
1044
1045 case ARM_MODE_SVC:
1046 case ARM_MODE_ABT:
1047 case ARM_MODE_FIQ:
1048 case ARM_MODE_IRQ:
1049 case ARM_MODE_HYP:
1050 case ARM_MODE_SYS:
1051 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1052 break;
1053
1054 default:
1055 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1056 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1057 return ERROR_FAIL;
1058 }
1059
1060 if (target_mode != ARM_MODE_ANY)
1061 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1062
1063 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1064 if (retval != ERROR_OK)
1065 return retval;
1066
1067 if (target_mode != ARM_MODE_ANY)
1068 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1069
1070 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1071 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1072
1073 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1074 armv8_identify_cache(armv8);
1075 armv8_read_mpidr(armv8);
1076 }
1077
1078 armv8->armv8_mmu.mmu_enabled =
1079 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1080 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1081 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1082 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1083 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1084 return ERROR_OK;
1085 }
1086
1087 /*
1088 * single-step a target
1089 */
1090 static int aarch64_step(struct target *target, int current, target_addr_t address,
1091 int handle_breakpoints)
1092 {
1093 struct armv8_common *armv8 = target_to_armv8(target);
1094 struct aarch64_common *aarch64 = target_to_aarch64(target);
1095 int saved_retval = ERROR_OK;
1096 int retval;
1097 uint32_t edecr;
1098
1099 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1100
1101 if (target->state != TARGET_HALTED) {
1102 LOG_WARNING("target not halted");
1103 return ERROR_TARGET_NOT_HALTED;
1104 }
1105
1106 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1107 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1108 /* make sure EDECR.SS is not set when restoring the register */
1109
1110 if (retval == ERROR_OK) {
1111 edecr &= ~0x4;
1112 /* set EDECR.SS to enter hardware step mode */
1113 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1114 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1115 }
1116 /* disable interrupts while stepping */
1117 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1118 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1119 /* bail out if stepping setup has failed */
1120 if (retval != ERROR_OK)
1121 return retval;
1122
1123 if (target->smp && (current == 1)) {
1124 /*
1125 * isolate current target so that it doesn't get resumed
1126 * together with the others
1127 */
1128 retval = arm_cti_gate_channel(armv8->cti, 1);
1129 /* resume all other targets in the group */
1130 if (retval == ERROR_OK)
1131 retval = aarch64_step_restart_smp(target);
1132 if (retval != ERROR_OK) {
1133 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1134 return retval;
1135 }
1136 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1137 }
1138
1139 /* all other targets running, restore and restart the current target */
1140 retval = aarch64_restore_one(target, current, &address, 0, 0);
1141 if (retval == ERROR_OK)
1142 retval = aarch64_restart_one(target, RESTART_LAZY);
1143
1144 if (retval != ERROR_OK)
1145 return retval;
1146
1147 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1148 if (!handle_breakpoints)
1149 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1150
1151 int64_t then = timeval_ms();
1152 for (;;) {
1153 int stepped;
1154 uint32_t prsr;
1155
1156 retval = aarch64_check_state_one(target,
1157 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1158 if (retval != ERROR_OK || stepped)
1159 break;
1160
1161 if (timeval_ms() > then + 100) {
1162 LOG_ERROR("timeout waiting for target %s halt after step",
1163 target_name(target));
1164 retval = ERROR_TARGET_TIMEOUT;
1165 break;
1166 }
1167 }
1168
1169 /*
1170 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1171 * causes a timeout. The core takes the step but doesn't complete it and so
1172 * debug state is never entered. However, you can manually halt the core
1173 * as an external debug even is also a WFI wakeup event.
1174 */
1175 if (retval == ERROR_TARGET_TIMEOUT)
1176 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1177
1178 /* restore EDECR */
1179 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1180 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1181 if (retval != ERROR_OK)
1182 return retval;
1183
1184 /* restore interrupts */
1185 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1186 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1187 if (retval != ERROR_OK)
1188 return ERROR_OK;
1189 }
1190
1191 if (saved_retval != ERROR_OK)
1192 return saved_retval;
1193
1194 return ERROR_OK;
1195 }
1196
1197 static int aarch64_restore_context(struct target *target, bool bpwp)
1198 {
1199 struct armv8_common *armv8 = target_to_armv8(target);
1200 struct arm *arm = &armv8->arm;
1201
1202 int retval;
1203
1204 LOG_DEBUG("%s", target_name(target));
1205
1206 if (armv8->pre_restore_context)
1207 armv8->pre_restore_context(target);
1208
1209 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1210 if (retval == ERROR_OK) {
1211 /* registers are now invalid */
1212 register_cache_invalidate(arm->core_cache);
1213 register_cache_invalidate(arm->core_cache->next);
1214 }
1215
1216 return retval;
1217 }
1218
1219 /*
1220 * Cortex-A8 Breakpoint and watchpoint functions
1221 */
1222
1223 /* Setup hardware Breakpoint Register Pair */
1224 static int aarch64_set_breakpoint(struct target *target,
1225 struct breakpoint *breakpoint, uint8_t matchmode)
1226 {
1227 int retval;
1228 int brp_i = 0;
1229 uint32_t control;
1230 uint8_t byte_addr_select = 0x0F;
1231 struct aarch64_common *aarch64 = target_to_aarch64(target);
1232 struct armv8_common *armv8 = &aarch64->armv8_common;
1233 struct aarch64_brp *brp_list = aarch64->brp_list;
1234
1235 if (breakpoint->set) {
1236 LOG_WARNING("breakpoint already set");
1237 return ERROR_OK;
1238 }
1239
1240 if (breakpoint->type == BKPT_HARD) {
1241 int64_t bpt_value;
1242 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1243 brp_i++;
1244 if (brp_i >= aarch64->brp_num) {
1245 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1246 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1247 }
1248 breakpoint->set = brp_i + 1;
1249 if (breakpoint->length == 2)
1250 byte_addr_select = (3 << (breakpoint->address & 0x02));
1251 control = ((matchmode & 0x7) << 20)
1252 | (1 << 13)
1253 | (byte_addr_select << 5)
1254 | (3 << 1) | 1;
1255 brp_list[brp_i].used = 1;
1256 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1257 brp_list[brp_i].control = control;
1258 bpt_value = brp_list[brp_i].value;
1259
1260 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1261 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1262 (uint32_t)(bpt_value & 0xFFFFFFFF));
1263 if (retval != ERROR_OK)
1264 return retval;
1265 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1266 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1267 (uint32_t)(bpt_value >> 32));
1268 if (retval != ERROR_OK)
1269 return retval;
1270
1271 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1272 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1273 brp_list[brp_i].control);
1274 if (retval != ERROR_OK)
1275 return retval;
1276 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1277 brp_list[brp_i].control,
1278 brp_list[brp_i].value);
1279
1280 } else if (breakpoint->type == BKPT_SOFT) {
1281 uint32_t opcode;
1282 uint8_t code[4];
1283
1284 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1285 opcode = ARMV8_HLT(11);
1286
1287 if (breakpoint->length != 4)
1288 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1289 } else {
1290 /**
1291 * core_state is ARM_STATE_ARM
1292 * in that case the opcode depends on breakpoint length:
1293 * - if length == 4 => A32 opcode
1294 * - if length == 2 => T32 opcode
1295 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1296 * in that case the length should be changed from 3 to 4 bytes
1297 **/
1298 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1299 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1300
1301 if (breakpoint->length == 3)
1302 breakpoint->length = 4;
1303 }
1304
1305 buf_set_u32(code, 0, 32, opcode);
1306
1307 retval = target_read_memory(target,
1308 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1309 breakpoint->length, 1,
1310 breakpoint->orig_instr);
1311 if (retval != ERROR_OK)
1312 return retval;
1313
1314 armv8_cache_d_inner_flush_virt(armv8,
1315 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1316 breakpoint->length);
1317
1318 retval = target_write_memory(target,
1319 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1320 breakpoint->length, 1, code);
1321 if (retval != ERROR_OK)
1322 return retval;
1323
1324 armv8_cache_d_inner_flush_virt(armv8,
1325 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1326 breakpoint->length);
1327
1328 armv8_cache_i_inner_inval_virt(armv8,
1329 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1330 breakpoint->length);
1331
1332 breakpoint->set = 0x11; /* Any nice value but 0 */
1333 }
1334
1335 /* Ensure that halting debug mode is enable */
1336 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1337 if (retval != ERROR_OK) {
1338 LOG_DEBUG("Failed to set DSCR.HDE");
1339 return retval;
1340 }
1341
1342 return ERROR_OK;
1343 }
1344
1345 static int aarch64_set_context_breakpoint(struct target *target,
1346 struct breakpoint *breakpoint, uint8_t matchmode)
1347 {
1348 int retval = ERROR_FAIL;
1349 int brp_i = 0;
1350 uint32_t control;
1351 uint8_t byte_addr_select = 0x0F;
1352 struct aarch64_common *aarch64 = target_to_aarch64(target);
1353 struct armv8_common *armv8 = &aarch64->armv8_common;
1354 struct aarch64_brp *brp_list = aarch64->brp_list;
1355
1356 if (breakpoint->set) {
1357 LOG_WARNING("breakpoint already set");
1358 return retval;
1359 }
1360 /*check available context BRPs*/
1361 while ((brp_list[brp_i].used ||
1362 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1363 brp_i++;
1364
1365 if (brp_i >= aarch64->brp_num) {
1366 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1367 return ERROR_FAIL;
1368 }
1369
1370 breakpoint->set = brp_i + 1;
1371 control = ((matchmode & 0x7) << 20)
1372 | (1 << 13)
1373 | (byte_addr_select << 5)
1374 | (3 << 1) | 1;
1375 brp_list[brp_i].used = 1;
1376 brp_list[brp_i].value = (breakpoint->asid);
1377 brp_list[brp_i].control = control;
1378 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1379 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1380 brp_list[brp_i].value);
1381 if (retval != ERROR_OK)
1382 return retval;
1383 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1384 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1385 brp_list[brp_i].control);
1386 if (retval != ERROR_OK)
1387 return retval;
1388 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1389 brp_list[brp_i].control,
1390 brp_list[brp_i].value);
1391 return ERROR_OK;
1392
1393 }
1394
1395 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1396 {
1397 int retval = ERROR_FAIL;
1398 int brp_1 = 0; /* holds the contextID pair */
1399 int brp_2 = 0; /* holds the IVA pair */
1400 uint32_t control_CTX, control_IVA;
1401 uint8_t CTX_byte_addr_select = 0x0F;
1402 uint8_t IVA_byte_addr_select = 0x0F;
1403 uint8_t CTX_machmode = 0x03;
1404 uint8_t IVA_machmode = 0x01;
1405 struct aarch64_common *aarch64 = target_to_aarch64(target);
1406 struct armv8_common *armv8 = &aarch64->armv8_common;
1407 struct aarch64_brp *brp_list = aarch64->brp_list;
1408
1409 if (breakpoint->set) {
1410 LOG_WARNING("breakpoint already set");
1411 return retval;
1412 }
1413 /*check available context BRPs*/
1414 while ((brp_list[brp_1].used ||
1415 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1416 brp_1++;
1417
1418 printf("brp(CTX) found num: %d\n", brp_1);
1419 if (brp_1 >= aarch64->brp_num) {
1420 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1421 return ERROR_FAIL;
1422 }
1423
1424 while ((brp_list[brp_2].used ||
1425 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1426 brp_2++;
1427
1428 printf("brp(IVA) found num: %d\n", brp_2);
1429 if (brp_2 >= aarch64->brp_num) {
1430 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1431 return ERROR_FAIL;
1432 }
1433
1434 breakpoint->set = brp_1 + 1;
1435 breakpoint->linked_BRP = brp_2;
1436 control_CTX = ((CTX_machmode & 0x7) << 20)
1437 | (brp_2 << 16)
1438 | (0 << 14)
1439 | (CTX_byte_addr_select << 5)
1440 | (3 << 1) | 1;
1441 brp_list[brp_1].used = 1;
1442 brp_list[brp_1].value = (breakpoint->asid);
1443 brp_list[brp_1].control = control_CTX;
1444 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1445 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1446 brp_list[brp_1].value);
1447 if (retval != ERROR_OK)
1448 return retval;
1449 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1450 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1451 brp_list[brp_1].control);
1452 if (retval != ERROR_OK)
1453 return retval;
1454
1455 control_IVA = ((IVA_machmode & 0x7) << 20)
1456 | (brp_1 << 16)
1457 | (1 << 13)
1458 | (IVA_byte_addr_select << 5)
1459 | (3 << 1) | 1;
1460 brp_list[brp_2].used = 1;
1461 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1462 brp_list[brp_2].control = control_IVA;
1463 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1464 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1465 brp_list[brp_2].value & 0xFFFFFFFF);
1466 if (retval != ERROR_OK)
1467 return retval;
1468 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1469 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1470 brp_list[brp_2].value >> 32);
1471 if (retval != ERROR_OK)
1472 return retval;
1473 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1474 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1475 brp_list[brp_2].control);
1476 if (retval != ERROR_OK)
1477 return retval;
1478
1479 return ERROR_OK;
1480 }
1481
1482 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1483 {
1484 int retval;
1485 struct aarch64_common *aarch64 = target_to_aarch64(target);
1486 struct armv8_common *armv8 = &aarch64->armv8_common;
1487 struct aarch64_brp *brp_list = aarch64->brp_list;
1488
1489 if (!breakpoint->set) {
1490 LOG_WARNING("breakpoint not set");
1491 return ERROR_OK;
1492 }
1493
1494 if (breakpoint->type == BKPT_HARD) {
1495 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1496 int brp_i = breakpoint->set - 1;
1497 int brp_j = breakpoint->linked_BRP;
1498 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1499 LOG_DEBUG("Invalid BRP number in breakpoint");
1500 return ERROR_OK;
1501 }
1502 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1503 brp_list[brp_i].control, brp_list[brp_i].value);
1504 brp_list[brp_i].used = 0;
1505 brp_list[brp_i].value = 0;
1506 brp_list[brp_i].control = 0;
1507 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1508 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1509 brp_list[brp_i].control);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1513 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1514 (uint32_t)brp_list[brp_i].value);
1515 if (retval != ERROR_OK)
1516 return retval;
1517 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1518 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1519 (uint32_t)brp_list[brp_i].value);
1520 if (retval != ERROR_OK)
1521 return retval;
1522 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1523 LOG_DEBUG("Invalid BRP number in breakpoint");
1524 return ERROR_OK;
1525 }
1526 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1527 brp_list[brp_j].control, brp_list[brp_j].value);
1528 brp_list[brp_j].used = 0;
1529 brp_list[brp_j].value = 0;
1530 brp_list[brp_j].control = 0;
1531 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1532 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1533 brp_list[brp_j].control);
1534 if (retval != ERROR_OK)
1535 return retval;
1536 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1537 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1538 (uint32_t)brp_list[brp_j].value);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1542 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1543 (uint32_t)brp_list[brp_j].value);
1544 if (retval != ERROR_OK)
1545 return retval;
1546
1547 breakpoint->linked_BRP = 0;
1548 breakpoint->set = 0;
1549 return ERROR_OK;
1550
1551 } else {
1552 int brp_i = breakpoint->set - 1;
1553 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1554 LOG_DEBUG("Invalid BRP number in breakpoint");
1555 return ERROR_OK;
1556 }
1557 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1558 brp_list[brp_i].control, brp_list[brp_i].value);
1559 brp_list[brp_i].used = 0;
1560 brp_list[brp_i].value = 0;
1561 brp_list[brp_i].control = 0;
1562 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1563 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1564 brp_list[brp_i].control);
1565 if (retval != ERROR_OK)
1566 return retval;
1567 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1568 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1569 brp_list[brp_i].value);
1570 if (retval != ERROR_OK)
1571 return retval;
1572
1573 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1574 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1575 (uint32_t)brp_list[brp_i].value);
1576 if (retval != ERROR_OK)
1577 return retval;
1578 breakpoint->set = 0;
1579 return ERROR_OK;
1580 }
1581 } else {
1582 /* restore original instruction (kept in target endianness) */
1583
1584 armv8_cache_d_inner_flush_virt(armv8,
1585 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1586 breakpoint->length);
1587
1588 if (breakpoint->length == 4) {
1589 retval = target_write_memory(target,
1590 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1591 4, 1, breakpoint->orig_instr);
1592 if (retval != ERROR_OK)
1593 return retval;
1594 } else {
1595 retval = target_write_memory(target,
1596 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1597 2, 1, breakpoint->orig_instr);
1598 if (retval != ERROR_OK)
1599 return retval;
1600 }
1601
1602 armv8_cache_d_inner_flush_virt(armv8,
1603 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1604 breakpoint->length);
1605
1606 armv8_cache_i_inner_inval_virt(armv8,
1607 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1608 breakpoint->length);
1609 }
1610 breakpoint->set = 0;
1611
1612 return ERROR_OK;
1613 }
1614
1615 static int aarch64_add_breakpoint(struct target *target,
1616 struct breakpoint *breakpoint)
1617 {
1618 struct aarch64_common *aarch64 = target_to_aarch64(target);
1619
1620 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1621 LOG_INFO("no hardware breakpoint available");
1622 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1623 }
1624
1625 if (breakpoint->type == BKPT_HARD)
1626 aarch64->brp_num_available--;
1627
1628 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1629 }
1630
1631 static int aarch64_add_context_breakpoint(struct target *target,
1632 struct breakpoint *breakpoint)
1633 {
1634 struct aarch64_common *aarch64 = target_to_aarch64(target);
1635
1636 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1637 LOG_INFO("no hardware breakpoint available");
1638 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1639 }
1640
1641 if (breakpoint->type == BKPT_HARD)
1642 aarch64->brp_num_available--;
1643
1644 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1645 }
1646
1647 static int aarch64_add_hybrid_breakpoint(struct target *target,
1648 struct breakpoint *breakpoint)
1649 {
1650 struct aarch64_common *aarch64 = target_to_aarch64(target);
1651
1652 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1653 LOG_INFO("no hardware breakpoint available");
1654 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1655 }
1656
1657 if (breakpoint->type == BKPT_HARD)
1658 aarch64->brp_num_available--;
1659
1660 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1661 }
1662
1663 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1664 {
1665 struct aarch64_common *aarch64 = target_to_aarch64(target);
1666
1667 #if 0
1668 /* It is perfectly possible to remove breakpoints while the target is running */
1669 if (target->state != TARGET_HALTED) {
1670 LOG_WARNING("target not halted");
1671 return ERROR_TARGET_NOT_HALTED;
1672 }
1673 #endif
1674
1675 if (breakpoint->set) {
1676 aarch64_unset_breakpoint(target, breakpoint);
1677 if (breakpoint->type == BKPT_HARD)
1678 aarch64->brp_num_available++;
1679 }
1680
1681 return ERROR_OK;
1682 }
1683
1684 /* Setup hardware Watchpoint Register Pair */
1685 static int aarch64_set_watchpoint(struct target *target,
1686 struct watchpoint *watchpoint)
1687 {
1688 int retval;
1689 int wp_i = 0;
1690 uint32_t control, offset, length;
1691 struct aarch64_common *aarch64 = target_to_aarch64(target);
1692 struct armv8_common *armv8 = &aarch64->armv8_common;
1693 struct aarch64_brp *wp_list = aarch64->wp_list;
1694
1695 if (watchpoint->set) {
1696 LOG_WARNING("watchpoint already set");
1697 return ERROR_OK;
1698 }
1699
1700 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1701 wp_i++;
1702 if (wp_i >= aarch64->wp_num) {
1703 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1704 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1705 }
1706
1707 control = (1 << 0) /* enable */
1708 | (3 << 1) /* both user and privileged access */
1709 | (1 << 13); /* higher mode control */
1710
1711 switch (watchpoint->rw) {
1712 case WPT_READ:
1713 control |= 1 << 3;
1714 break;
1715 case WPT_WRITE:
1716 control |= 2 << 3;
1717 break;
1718 case WPT_ACCESS:
1719 control |= 3 << 3;
1720 break;
1721 }
1722
1723 /* Match up to 8 bytes. */
1724 offset = watchpoint->address & 7;
1725 length = watchpoint->length;
1726 if (offset + length > sizeof(uint64_t)) {
1727 length = sizeof(uint64_t) - offset;
1728 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1729 }
1730 for (; length > 0; offset++, length--)
1731 control |= (1 << offset) << 5;
1732
1733 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1734 wp_list[wp_i].control = control;
1735
1736 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1737 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].BRPn,
1738 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1739 if (retval != ERROR_OK)
1740 return retval;
1741 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1742 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].BRPn,
1743 (uint32_t)(wp_list[wp_i].value >> 32));
1744 if (retval != ERROR_OK)
1745 return retval;
1746
1747 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1748 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].BRPn,
1749 control);
1750 if (retval != ERROR_OK)
1751 return retval;
1752 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1753 wp_list[wp_i].control, wp_list[wp_i].value);
1754
1755 /* Ensure that halting debug mode is enable */
1756 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1757 if (retval != ERROR_OK) {
1758 LOG_DEBUG("Failed to set DSCR.HDE");
1759 return retval;
1760 }
1761
1762 wp_list[wp_i].used = 1;
1763 watchpoint->set = wp_i + 1;
1764
1765 return ERROR_OK;
1766 }
1767
1768 /* Clear hardware Watchpoint Register Pair */
1769 static int aarch64_unset_watchpoint(struct target *target,
1770 struct watchpoint *watchpoint)
1771 {
1772 int retval, wp_i;
1773 struct aarch64_common *aarch64 = target_to_aarch64(target);
1774 struct armv8_common *armv8 = &aarch64->armv8_common;
1775 struct aarch64_brp *wp_list = aarch64->wp_list;
1776
1777 if (!watchpoint->set) {
1778 LOG_WARNING("watchpoint not set");
1779 return ERROR_OK;
1780 }
1781
1782 wp_i = watchpoint->set - 1;
1783 if ((wp_i < 0) || (wp_i >= aarch64->wp_num)) {
1784 LOG_DEBUG("Invalid WP number in watchpoint");
1785 return ERROR_OK;
1786 }
1787 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1788 wp_list[wp_i].control, wp_list[wp_i].value);
1789 wp_list[wp_i].used = 0;
1790 wp_list[wp_i].value = 0;
1791 wp_list[wp_i].control = 0;
1792 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1793 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].BRPn,
1794 wp_list[wp_i].control);
1795 if (retval != ERROR_OK)
1796 return retval;
1797 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1798 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].BRPn,
1799 wp_list[wp_i].value);
1800 if (retval != ERROR_OK)
1801 return retval;
1802
1803 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1804 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].BRPn,
1805 (uint32_t)wp_list[wp_i].value);
1806 if (retval != ERROR_OK)
1807 return retval;
1808 watchpoint->set = 0;
1809
1810 return ERROR_OK;
1811 }
1812
1813 static int aarch64_add_watchpoint(struct target *target,
1814 struct watchpoint *watchpoint)
1815 {
1816 int retval;
1817 struct aarch64_common *aarch64 = target_to_aarch64(target);
1818
1819 if (aarch64->wp_num_available < 1) {
1820 LOG_INFO("no hardware watchpoint available");
1821 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1822 }
1823
1824 retval = aarch64_set_watchpoint(target, watchpoint);
1825 if (retval == ERROR_OK)
1826 aarch64->wp_num_available--;
1827
1828 return retval;
1829 }
1830
1831 static int aarch64_remove_watchpoint(struct target *target,
1832 struct watchpoint *watchpoint)
1833 {
1834 struct aarch64_common *aarch64 = target_to_aarch64(target);
1835
1836 if (watchpoint->set) {
1837 aarch64_unset_watchpoint(target, watchpoint);
1838 aarch64->wp_num_available++;
1839 }
1840
1841 return ERROR_OK;
1842 }
1843
1844 /**
1845 * find out which watchpoint hits
1846 * get exception address and compare the address to watchpoints
1847 */
1848 int aarch64_hit_watchpoint(struct target *target,
1849 struct watchpoint **hit_watchpoint)
1850 {
1851 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1852 return ERROR_FAIL;
1853
1854 struct armv8_common *armv8 = target_to_armv8(target);
1855
1856 uint64_t exception_address;
1857 struct watchpoint *wp;
1858
1859 exception_address = armv8->dpm.wp_pc;
1860
1861 if (exception_address == 0xFFFFFFFF)
1862 return ERROR_FAIL;
1863
1864 /**********************************************************/
1865 /* see if a watchpoint address matches a value read from */
1866 /* the EDWAR register. Testing shows that on some ARM CPUs*/
1867 /* the EDWAR value needs to have 8 added to it so we add */
1868 /* that check as well not sure if that is a core bug) */
1869 /**********************************************************/
1870 for (exception_address = armv8->dpm.wp_pc; exception_address <= (armv8->dpm.wp_pc + 8);
1871 exception_address += 8) {
1872 for (wp = target->watchpoints; wp; wp = wp->next) {
1873 if ((exception_address >= wp->address) && (exception_address < (wp->address + wp->length))) {
1874 *hit_watchpoint = wp;
1875 if (exception_address != armv8->dpm.wp_pc)
1876 LOG_DEBUG("watchpoint hit required EDWAR to be increased by 8");
1877 return ERROR_OK;
1878 }
1879 }
1880 }
1881
1882 return ERROR_FAIL;
1883 }
1884
1885 /*
1886 * Cortex-A8 Reset functions
1887 */
1888
1889 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1890 {
1891 struct armv8_common *armv8 = target_to_armv8(target);
1892 uint32_t edecr;
1893 int retval;
1894
1895 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1896 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1897 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1898 if (retval != ERROR_OK)
1899 return retval;
1900
1901 if (enable)
1902 edecr |= ECR_RCE;
1903 else
1904 edecr &= ~ECR_RCE;
1905
1906 return mem_ap_write_atomic_u32(armv8->debug_ap,
1907 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1908 }
1909
1910 static int aarch64_clear_reset_catch(struct target *target)
1911 {
1912 struct armv8_common *armv8 = target_to_armv8(target);
1913 uint32_t edesr;
1914 int retval;
1915 bool was_triggered;
1916
1917 /* check if Reset Catch debug event triggered as expected */
1918 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1919 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1920 if (retval != ERROR_OK)
1921 return retval;
1922
1923 was_triggered = !!(edesr & ESR_RC);
1924 LOG_DEBUG("Reset Catch debug event %s",
1925 was_triggered ? "triggered" : "NOT triggered!");
1926
1927 if (was_triggered) {
1928 /* clear pending Reset Catch debug event */
1929 edesr &= ~ESR_RC;
1930 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1931 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1932 if (retval != ERROR_OK)
1933 return retval;
1934 }
1935
1936 return ERROR_OK;
1937 }
1938
1939 static int aarch64_assert_reset(struct target *target)
1940 {
1941 struct armv8_common *armv8 = target_to_armv8(target);
1942 enum reset_types reset_config = jtag_get_reset_config();
1943 int retval;
1944
1945 LOG_DEBUG(" ");
1946
1947 /* Issue some kind of warm reset. */
1948 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1949 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1950 else if (reset_config & RESET_HAS_SRST) {
1951 bool srst_asserted = false;
1952
1953 if (target->reset_halt) {
1954 if (target_was_examined(target)) {
1955
1956 if (reset_config & RESET_SRST_NO_GATING) {
1957 /*
1958 * SRST needs to be asserted *before* Reset Catch
1959 * debug event can be set up.
1960 */
1961 adapter_assert_reset();
1962 srst_asserted = true;
1963
1964 /* make sure to clear all sticky errors */
1965 mem_ap_write_atomic_u32(armv8->debug_ap,
1966 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1967 }
1968
1969 /* set up Reset Catch debug event to halt the CPU after reset */
1970 retval = aarch64_enable_reset_catch(target, true);
1971 if (retval != ERROR_OK)
1972 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1973 target_name(target));
1974 } else {
1975 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1976 target_name(target));
1977 }
1978 }
1979
1980 /* REVISIT handle "pulls" cases, if there's
1981 * hardware that needs them to work.
1982 */
1983 if (!srst_asserted)
1984 adapter_assert_reset();
1985 } else {
1986 LOG_ERROR("%s: how to reset?", target_name(target));
1987 return ERROR_FAIL;
1988 }
1989
1990 /* registers are now invalid */
1991 if (target_was_examined(target)) {
1992 register_cache_invalidate(armv8->arm.core_cache);
1993 register_cache_invalidate(armv8->arm.core_cache->next);
1994 }
1995
1996 target->state = TARGET_RESET;
1997
1998 return ERROR_OK;
1999 }
2000
2001 static int aarch64_deassert_reset(struct target *target)
2002 {
2003 int retval;
2004
2005 LOG_DEBUG(" ");
2006
2007 /* be certain SRST is off */
2008 adapter_deassert_reset();
2009
2010 if (!target_was_examined(target))
2011 return ERROR_OK;
2012
2013 retval = aarch64_init_debug_access(target);
2014 if (retval != ERROR_OK)
2015 return retval;
2016
2017 retval = aarch64_poll(target);
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 if (target->reset_halt) {
2022 /* clear pending Reset Catch debug event */
2023 retval = aarch64_clear_reset_catch(target);
2024 if (retval != ERROR_OK)
2025 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2026 target_name(target));
2027
2028 /* disable Reset Catch debug event */
2029 retval = aarch64_enable_reset_catch(target, false);
2030 if (retval != ERROR_OK)
2031 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2032 target_name(target));
2033
2034 if (target->state != TARGET_HALTED) {
2035 LOG_WARNING("%s: ran after reset and before halt ...",
2036 target_name(target));
2037 retval = target_halt(target);
2038 if (retval != ERROR_OK)
2039 return retval;
2040 }
2041 }
2042
2043 return ERROR_OK;
2044 }
2045
2046 static int aarch64_write_cpu_memory_slow(struct target *target,
2047 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2048 {
2049 struct armv8_common *armv8 = target_to_armv8(target);
2050 struct arm_dpm *dpm = &armv8->dpm;
2051 struct arm *arm = &armv8->arm;
2052 int retval;
2053
2054 armv8_reg_current(arm, 1)->dirty = true;
2055
2056 /* change DCC to normal mode if necessary */
2057 if (*dscr & DSCR_MA) {
2058 *dscr &= ~DSCR_MA;
2059 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2060 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2061 if (retval != ERROR_OK)
2062 return retval;
2063 }
2064
2065 while (count) {
2066 uint32_t data, opcode;
2067
2068 /* write the data to store into DTRRX */
2069 if (size == 1)
2070 data = *buffer;
2071 else if (size == 2)
2072 data = target_buffer_get_u16(target, buffer);
2073 else
2074 data = target_buffer_get_u32(target, buffer);
2075 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2076 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2077 if (retval != ERROR_OK)
2078 return retval;
2079
2080 if (arm->core_state == ARM_STATE_AARCH64)
2081 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2082 else
2083 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2084 if (retval != ERROR_OK)
2085 return retval;
2086
2087 if (size == 1)
2088 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2089 else if (size == 2)
2090 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2091 else
2092 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2093 retval = dpm->instr_execute(dpm, opcode);
2094 if (retval != ERROR_OK)
2095 return retval;
2096
2097 /* Advance */
2098 buffer += size;
2099 --count;
2100 }
2101
2102 return ERROR_OK;
2103 }
2104
2105 static int aarch64_write_cpu_memory_fast(struct target *target,
2106 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2107 {
2108 struct armv8_common *armv8 = target_to_armv8(target);
2109 struct arm *arm = &armv8->arm;
2110 int retval;
2111
2112 armv8_reg_current(arm, 1)->dirty = true;
2113
2114 /* Step 1.d - Change DCC to memory mode */
2115 *dscr |= DSCR_MA;
2116 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2117 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2118 if (retval != ERROR_OK)
2119 return retval;
2120
2121
2122 /* Step 2.a - Do the write */
2123 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2124 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2125 if (retval != ERROR_OK)
2126 return retval;
2127
2128 /* Step 3.a - Switch DTR mode back to Normal mode */
2129 *dscr &= ~DSCR_MA;
2130 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2131 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2132 if (retval != ERROR_OK)
2133 return retval;
2134
2135 return ERROR_OK;
2136 }
2137
2138 static int aarch64_write_cpu_memory(struct target *target,
2139 uint64_t address, uint32_t size,
2140 uint32_t count, const uint8_t *buffer)
2141 {
2142 /* write memory through APB-AP */
2143 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2144 struct armv8_common *armv8 = target_to_armv8(target);
2145 struct arm_dpm *dpm = &armv8->dpm;
2146 struct arm *arm = &armv8->arm;
2147 uint32_t dscr;
2148
2149 if (target->state != TARGET_HALTED) {
2150 LOG_WARNING("target not halted");
2151 return ERROR_TARGET_NOT_HALTED;
2152 }
2153
2154 /* Mark register X0 as dirty, as it will be used
2155 * for transferring the data.
2156 * It will be restored automatically when exiting
2157 * debug mode
2158 */
2159 armv8_reg_current(arm, 0)->dirty = true;
2160
2161 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2162
2163 /* Read DSCR */
2164 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2165 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2166 if (retval != ERROR_OK)
2167 return retval;
2168
2169 /* Set Normal access mode */
2170 dscr = (dscr & ~DSCR_MA);
2171 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2172 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2173 if (retval != ERROR_OK)
2174 return retval;
2175
2176 if (arm->core_state == ARM_STATE_AARCH64) {
2177 /* Write X0 with value 'address' using write procedure */
2178 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2179 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2180 retval = dpm->instr_write_data_dcc_64(dpm,
2181 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2182 } else {
2183 /* Write R0 with value 'address' using write procedure */
2184 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2185 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2186 retval = dpm->instr_write_data_dcc(dpm,
2187 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2188 }
2189
2190 if (retval != ERROR_OK)
2191 return retval;
2192
2193 if (size == 4 && (address % 4) == 0)
2194 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2195 else
2196 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2197
2198 if (retval != ERROR_OK) {
2199 /* Unset DTR mode */
2200 mem_ap_read_atomic_u32(armv8->debug_ap,
2201 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2202 dscr &= ~DSCR_MA;
2203 mem_ap_write_atomic_u32(armv8->debug_ap,
2204 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2205 }
2206
2207 /* Check for sticky abort flags in the DSCR */
2208 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2209 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2210 if (retval != ERROR_OK)
2211 return retval;
2212
2213 dpm->dscr = dscr;
2214 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2215 /* Abort occurred - clear it and exit */
2216 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2217 armv8_dpm_handle_exception(dpm, true);
2218 return ERROR_FAIL;
2219 }
2220
2221 /* Done */
2222 return ERROR_OK;
2223 }
2224
2225 static int aarch64_read_cpu_memory_slow(struct target *target,
2226 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2227 {
2228 struct armv8_common *armv8 = target_to_armv8(target);
2229 struct arm_dpm *dpm = &armv8->dpm;
2230 struct arm *arm = &armv8->arm;
2231 int retval;
2232
2233 armv8_reg_current(arm, 1)->dirty = true;
2234
2235 /* change DCC to normal mode (if necessary) */
2236 if (*dscr & DSCR_MA) {
2237 *dscr &= DSCR_MA;
2238 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2239 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2240 if (retval != ERROR_OK)
2241 return retval;
2242 }
2243
2244 while (count) {
2245 uint32_t opcode, data;
2246
2247 if (size == 1)
2248 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2249 else if (size == 2)
2250 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2251 else
2252 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2253 retval = dpm->instr_execute(dpm, opcode);
2254 if (retval != ERROR_OK)
2255 return retval;
2256
2257 if (arm->core_state == ARM_STATE_AARCH64)
2258 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2259 else
2260 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2261 if (retval != ERROR_OK)
2262 return retval;
2263
2264 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2265 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2266 if (retval != ERROR_OK)
2267 return retval;
2268
2269 if (size == 1)
2270 *buffer = (uint8_t)data;
2271 else if (size == 2)
2272 target_buffer_set_u16(target, buffer, (uint16_t)data);
2273 else
2274 target_buffer_set_u32(target, buffer, data);
2275
2276 /* Advance */
2277 buffer += size;
2278 --count;
2279 }
2280
2281 return ERROR_OK;
2282 }
2283
2284 static int aarch64_read_cpu_memory_fast(struct target *target,
2285 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2286 {
2287 struct armv8_common *armv8 = target_to_armv8(target);
2288 struct arm_dpm *dpm = &armv8->dpm;
2289 struct arm *arm = &armv8->arm;
2290 int retval;
2291 uint32_t value;
2292
2293 /* Mark X1 as dirty */
2294 armv8_reg_current(arm, 1)->dirty = true;
2295
2296 if (arm->core_state == ARM_STATE_AARCH64) {
2297 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2298 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2299 } else {
2300 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2301 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2302 }
2303
2304 if (retval != ERROR_OK)
2305 return retval;
2306
2307 /* Step 1.e - Change DCC to memory mode */
2308 *dscr |= DSCR_MA;
2309 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2310 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2311 if (retval != ERROR_OK)
2312 return retval;
2313
2314 /* Step 1.f - read DBGDTRTX and discard the value */
2315 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2316 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2317 if (retval != ERROR_OK)
2318 return retval;
2319
2320 count--;
2321 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2322 * Abort flags are sticky, so can be read at end of transactions
2323 *
2324 * This data is read in aligned to 32 bit boundary.
2325 */
2326
2327 if (count) {
2328 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2329 * increments X0 by 4. */
2330 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2331 armv8->debug_base + CPUV8_DBG_DTRTX);
2332 if (retval != ERROR_OK)
2333 return retval;
2334 }
2335
2336 /* Step 3.a - set DTR access mode back to Normal mode */
2337 *dscr &= ~DSCR_MA;
2338 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2339 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2340 if (retval != ERROR_OK)
2341 return retval;
2342
2343 /* Step 3.b - read DBGDTRTX for the final value */
2344 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2345 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2346 if (retval != ERROR_OK)
2347 return retval;
2348
2349 target_buffer_set_u32(target, buffer + count * 4, value);
2350 return retval;
2351 }
2352
2353 static int aarch64_read_cpu_memory(struct target *target,
2354 target_addr_t address, uint32_t size,
2355 uint32_t count, uint8_t *buffer)
2356 {
2357 /* read memory through APB-AP */
2358 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2359 struct armv8_common *armv8 = target_to_armv8(target);
2360 struct arm_dpm *dpm = &armv8->dpm;
2361 struct arm *arm = &armv8->arm;
2362 uint32_t dscr;
2363
2364 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2365 address, size, count);
2366
2367 if (target->state != TARGET_HALTED) {
2368 LOG_WARNING("target not halted");
2369 return ERROR_TARGET_NOT_HALTED;
2370 }
2371
2372 /* Mark register X0 as dirty, as it will be used
2373 * for transferring the data.
2374 * It will be restored automatically when exiting
2375 * debug mode
2376 */
2377 armv8_reg_current(arm, 0)->dirty = true;
2378
2379 /* Read DSCR */
2380 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2381 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2382 if (retval != ERROR_OK)
2383 return retval;
2384
2385 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2386
2387 /* Set Normal access mode */
2388 dscr &= ~DSCR_MA;
2389 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2390 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2391 if (retval != ERROR_OK)
2392 return retval;
2393
2394 if (arm->core_state == ARM_STATE_AARCH64) {
2395 /* Write X0 with value 'address' using write procedure */
2396 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2397 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2398 retval = dpm->instr_write_data_dcc_64(dpm,
2399 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2400 } else {
2401 /* Write R0 with value 'address' using write procedure */
2402 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2403 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2404 retval = dpm->instr_write_data_dcc(dpm,
2405 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2406 }
2407
2408 if (retval != ERROR_OK)
2409 return retval;
2410
2411 if (size == 4 && (address % 4) == 0)
2412 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2413 else
2414 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2415
2416 if (dscr & DSCR_MA) {
2417 dscr &= ~DSCR_MA;
2418 mem_ap_write_atomic_u32(armv8->debug_ap,
2419 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2420 }
2421
2422 if (retval != ERROR_OK)
2423 return retval;
2424
2425 /* Check for sticky abort flags in the DSCR */
2426 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2427 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2428 if (retval != ERROR_OK)
2429 return retval;
2430
2431 dpm->dscr = dscr;
2432
2433 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2434 /* Abort occurred - clear it and exit */
2435 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2436 armv8_dpm_handle_exception(dpm, true);
2437 return ERROR_FAIL;
2438 }
2439
2440 /* Done */
2441 return ERROR_OK;
2442 }
2443
2444 static int aarch64_read_phys_memory(struct target *target,
2445 target_addr_t address, uint32_t size,
2446 uint32_t count, uint8_t *buffer)
2447 {
2448 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2449
2450 if (count && buffer) {
2451 /* read memory through APB-AP */
2452 retval = aarch64_mmu_modify(target, 0);
2453 if (retval != ERROR_OK)
2454 return retval;
2455 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2456 }
2457 return retval;
2458 }
2459
2460 static int aarch64_read_memory(struct target *target, target_addr_t address,
2461 uint32_t size, uint32_t count, uint8_t *buffer)
2462 {
2463 int mmu_enabled = 0;
2464 int retval;
2465
2466 /* determine if MMU was enabled on target stop */
2467 retval = aarch64_mmu(target, &mmu_enabled);
2468 if (retval != ERROR_OK)
2469 return retval;
2470
2471 if (mmu_enabled) {
2472 /* enable MMU as we could have disabled it for phys access */
2473 retval = aarch64_mmu_modify(target, 1);
2474 if (retval != ERROR_OK)
2475 return retval;
2476 }
2477 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2478 }
2479
2480 static int aarch64_write_phys_memory(struct target *target,
2481 target_addr_t address, uint32_t size,
2482 uint32_t count, const uint8_t *buffer)
2483 {
2484 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2485
2486 if (count && buffer) {
2487 /* write memory through APB-AP */
2488 retval = aarch64_mmu_modify(target, 0);
2489 if (retval != ERROR_OK)
2490 return retval;
2491 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2492 }
2493
2494 return retval;
2495 }
2496
2497 static int aarch64_write_memory(struct target *target, target_addr_t address,
2498 uint32_t size, uint32_t count, const uint8_t *buffer)
2499 {
2500 int mmu_enabled = 0;
2501 int retval;
2502
2503 /* determine if MMU was enabled on target stop */
2504 retval = aarch64_mmu(target, &mmu_enabled);
2505 if (retval != ERROR_OK)
2506 return retval;
2507
2508 if (mmu_enabled) {
2509 /* enable MMU as we could have disabled it for phys access */
2510 retval = aarch64_mmu_modify(target, 1);
2511 if (retval != ERROR_OK)
2512 return retval;
2513 }
2514 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2515 }
2516
2517 static int aarch64_handle_target_request(void *priv)
2518 {
2519 struct target *target = priv;
2520 struct armv8_common *armv8 = target_to_armv8(target);
2521 int retval;
2522
2523 if (!target_was_examined(target))
2524 return ERROR_OK;
2525 if (!target->dbg_msg_enabled)
2526 return ERROR_OK;
2527
2528 if (target->state == TARGET_RUNNING) {
2529 uint32_t request;
2530 uint32_t dscr;
2531 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2532 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2533
2534 /* check if we have data */
2535 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2536 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2537 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2538 if (retval == ERROR_OK) {
2539 target_request(target, request);
2540 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2541 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2542 }
2543 }
2544 }
2545
2546 return ERROR_OK;
2547 }
2548
2549 static int aarch64_examine_first(struct target *target)
2550 {
2551 struct aarch64_common *aarch64 = target_to_aarch64(target);
2552 struct armv8_common *armv8 = &aarch64->armv8_common;
2553 struct adiv5_dap *swjdp = armv8->arm.dap;
2554 struct aarch64_private_config *pc = target->private_config;
2555 int i;
2556 int retval = ERROR_OK;
2557 uint64_t debug, ttypr;
2558 uint32_t cpuid;
2559 uint32_t tmp0, tmp1, tmp2, tmp3;
2560 debug = ttypr = cpuid = 0;
2561
2562 if (pc == NULL)
2563 return ERROR_FAIL;
2564
2565 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2566 /* Search for the APB-AB */
2567 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2568 if (retval != ERROR_OK) {
2569 LOG_ERROR("Could not find APB-AP for debug access");
2570 return retval;
2571 }
2572 } else {
2573 armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
2574 }
2575
2576 retval = mem_ap_init(armv8->debug_ap);
2577 if (retval != ERROR_OK) {
2578 LOG_ERROR("Could not initialize the APB-AP");
2579 return retval;
2580 }
2581
2582 armv8->debug_ap->memaccess_tck = 10;
2583
2584 if (!target->dbgbase_set) {
2585 uint32_t dbgbase;
2586 /* Get ROM Table base */
2587 uint32_t apid;
2588 int32_t coreidx = target->coreid;
2589 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2590 if (retval != ERROR_OK)
2591 return retval;
2592 /* Lookup 0x15 -- Processor DAP */
2593 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2594 &armv8->debug_base, &coreidx);
2595 if (retval != ERROR_OK)
2596 return retval;
2597 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2598 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2599 } else
2600 armv8->debug_base = target->dbgbase;
2601
2602 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2603 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2604 if (retval != ERROR_OK) {
2605 LOG_DEBUG("Examine %s failed", "oslock");
2606 return retval;
2607 }
2608
2609 retval = mem_ap_read_u32(armv8->debug_ap,
2610 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2611 if (retval != ERROR_OK) {
2612 LOG_DEBUG("Examine %s failed", "CPUID");
2613 return retval;
2614 }
2615
2616 retval = mem_ap_read_u32(armv8->debug_ap,
2617 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2618 retval += mem_ap_read_u32(armv8->debug_ap,
2619 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2620 if (retval != ERROR_OK) {
2621 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2622 return retval;
2623 }
2624 retval = mem_ap_read_u32(armv8->debug_ap,
2625 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2626 retval += mem_ap_read_u32(armv8->debug_ap,
2627 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2628 if (retval != ERROR_OK) {
2629 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2630 return retval;
2631 }
2632
2633 retval = dap_run(armv8->debug_ap->dap);
2634 if (retval != ERROR_OK) {
2635 LOG_ERROR("%s: examination failed\n", target_name(target));
2636 return retval;
2637 }
2638
2639 ttypr |= tmp1;
2640 ttypr = (ttypr << 32) | tmp0;
2641 debug |= tmp3;
2642 debug = (debug << 32) | tmp2;
2643
2644 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2645 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2646 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2647
2648 if (pc->cti == NULL)
2649 return ERROR_FAIL;
2650
2651 armv8->cti = pc->cti;
2652
2653 retval = aarch64_dpm_setup(aarch64, debug);
2654 if (retval != ERROR_OK)
2655 return retval;
2656
2657 /* Setup Breakpoint Register Pairs */
2658 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2659 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2660 aarch64->brp_num_available = aarch64->brp_num;
2661 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2662 for (i = 0; i < aarch64->brp_num; i++) {
2663 aarch64->brp_list[i].used = 0;
2664 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2665 aarch64->brp_list[i].type = BRP_NORMAL;
2666 else
2667 aarch64->brp_list[i].type = BRP_CONTEXT;
2668 aarch64->brp_list[i].value = 0;
2669 aarch64->brp_list[i].control = 0;
2670 aarch64->brp_list[i].BRPn = i;
2671 }
2672
2673 /* Setup Watchpoint Register Pairs */
2674 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2675 aarch64->wp_num_available = aarch64->wp_num;
2676 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2677 for (i = 0; i < aarch64->wp_num; i++) {
2678 aarch64->wp_list[i].used = 0;
2679 aarch64->wp_list[i].type = BRP_NORMAL;
2680 aarch64->wp_list[i].value = 0;
2681 aarch64->wp_list[i].control = 0;
2682 aarch64->wp_list[i].BRPn = i;
2683 }
2684
2685 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2686 aarch64->brp_num, aarch64->wp_num);
2687
2688 target->state = TARGET_UNKNOWN;
2689 target->debug_reason = DBG_REASON_NOTHALTED;
2690 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2691 target_set_examined(target);
2692 return ERROR_OK;
2693 }
2694
2695 static int aarch64_examine(struct target *target)
2696 {
2697 int retval = ERROR_OK;
2698
2699 /* don't re-probe hardware after each reset */
2700 if (!target_was_examined(target))
2701 retval = aarch64_examine_first(target);
2702
2703 /* Configure core debug access */
2704 if (retval == ERROR_OK)
2705 retval = aarch64_init_debug_access(target);
2706
2707 return retval;
2708 }
2709
2710 /*
2711 * Cortex-A8 target creation and initialization
2712 */
2713
2714 static int aarch64_init_target(struct command_context *cmd_ctx,
2715 struct target *target)
2716 {
2717 /* examine_first() does a bunch of this */
2718 arm_semihosting_init(target);
2719 return ERROR_OK;
2720 }
2721
2722 static int aarch64_init_arch_info(struct target *target,
2723 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2724 {
2725 struct armv8_common *armv8 = &aarch64->armv8_common;
2726
2727 /* Setup struct aarch64_common */
2728 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2729 armv8->arm.dap = dap;
2730
2731 /* register arch-specific functions */
2732 armv8->examine_debug_reason = NULL;
2733 armv8->post_debug_entry = aarch64_post_debug_entry;
2734 armv8->pre_restore_context = NULL;
2735 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2736
2737 armv8_init_arch_info(target, armv8);
2738 target_register_timer_callback(aarch64_handle_target_request, 1,
2739 TARGET_TIMER_TYPE_PERIODIC, target);
2740
2741 return ERROR_OK;
2742 }
2743
2744 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2745 {
2746 struct aarch64_private_config *pc = target->private_config;
2747 struct aarch64_common *aarch64;
2748
2749 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2750 return ERROR_FAIL;
2751
2752 aarch64 = calloc(1, sizeof(struct aarch64_common));
2753 if (aarch64 == NULL) {
2754 LOG_ERROR("Out of memory");
2755 return ERROR_FAIL;
2756 }
2757
2758 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2759 }
2760
2761 static void aarch64_deinit_target(struct target *target)
2762 {
2763 struct aarch64_common *aarch64 = target_to_aarch64(target);
2764 struct armv8_common *armv8 = &aarch64->armv8_common;
2765 struct arm_dpm *dpm = &armv8->dpm;
2766
2767 armv8_free_reg_cache(target);
2768 free(aarch64->brp_list);
2769 free(dpm->dbp);
2770 free(dpm->dwp);
2771 free(target->private_config);
2772 free(aarch64);
2773 }
2774
2775 static int aarch64_mmu(struct target *target, int *enabled)
2776 {
2777 if (target->state != TARGET_HALTED) {
2778 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2779 return ERROR_TARGET_INVALID;
2780 }
2781
2782 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2783 return ERROR_OK;
2784 }
2785
2786 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2787 target_addr_t *phys)
2788 {
2789 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2790 }
2791
2792 /*
2793 * private target configuration items
2794 */
2795 enum aarch64_cfg_param {
2796 CFG_CTI,
2797 };
2798
2799 static const Jim_Nvp nvp_config_opts[] = {
2800 { .name = "-cti", .value = CFG_CTI },
2801 { .name = NULL, .value = -1 }
2802 };
2803
2804 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2805 {
2806 struct aarch64_private_config *pc;
2807 Jim_Nvp *n;
2808 int e;
2809
2810 pc = (struct aarch64_private_config *)target->private_config;
2811 if (pc == NULL) {
2812 pc = calloc(1, sizeof(struct aarch64_private_config));
2813 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2814 target->private_config = pc;
2815 }
2816
2817 /*
2818 * Call adiv5_jim_configure() to parse the common DAP options
2819 * It will return JIM_CONTINUE if it didn't find any known
2820 * options, JIM_OK if it correctly parsed the topmost option
2821 * and JIM_ERR if an error occurred during parameter evaluation.
2822 * For JIM_CONTINUE, we check our own params.
2823 *
2824 * adiv5_jim_configure() assumes 'private_config' to point to
2825 * 'struct adiv5_private_config'. Override 'private_config'!
2826 */
2827 target->private_config = &pc->adiv5_config;
2828 e = adiv5_jim_configure(target, goi);
2829 target->private_config = pc;
2830 if (e != JIM_CONTINUE)
2831 return e;
2832
2833 /* parse config or cget options ... */
2834 if (goi->argc > 0) {
2835 Jim_SetEmptyResult(goi->interp);
2836
2837 /* check first if topmost item is for us */
2838 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2839 goi->argv[0], &n);
2840 if (e != JIM_OK)
2841 return JIM_CONTINUE;
2842
2843 e = Jim_GetOpt_Obj(goi, NULL);
2844 if (e != JIM_OK)
2845 return e;
2846
2847 switch (n->value) {
2848 case CFG_CTI: {
2849 if (goi->isconfigure) {
2850 Jim_Obj *o_cti;
2851 struct arm_cti *cti;
2852 e = Jim_GetOpt_Obj(goi, &o_cti);
2853 if (e != JIM_OK)
2854 return e;
2855 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2856 if (cti == NULL) {
2857 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2858 return JIM_ERR;
2859 }
2860 pc->cti = cti;
2861 } else {
2862 if (goi->argc != 0) {
2863 Jim_WrongNumArgs(goi->interp,
2864 goi->argc, goi->argv,
2865 "NO PARAMS");
2866 return JIM_ERR;
2867 }
2868
2869 if (pc == NULL || pc->cti == NULL) {
2870 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2871 return JIM_ERR;
2872 }
2873 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2874 }
2875 break;
2876 }
2877
2878 default:
2879 return JIM_CONTINUE;
2880 }
2881 }
2882
2883 return JIM_OK;
2884 }
2885
2886 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2887 {
2888 struct target *target = get_current_target(CMD_CTX);
2889 struct armv8_common *armv8 = target_to_armv8(target);
2890
2891 return armv8_handle_cache_info_command(CMD,
2892 &armv8->armv8_mmu.armv8_cache);
2893 }
2894
2895 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2896 {
2897 struct target *target = get_current_target(CMD_CTX);
2898 if (!target_was_examined(target)) {
2899 LOG_ERROR("target not examined yet");
2900 return ERROR_FAIL;
2901 }
2902
2903 return aarch64_init_debug_access(target);
2904 }
2905
2906 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2907 {
2908 struct target *target = get_current_target(CMD_CTX);
2909
2910 if (target == NULL) {
2911 LOG_ERROR("No target selected");
2912 return ERROR_FAIL;
2913 }
2914
2915 struct aarch64_common *aarch64 = target_to_aarch64(target);
2916
2917 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2918 command_print(CMD, "current target isn't an AArch64");
2919 return ERROR_FAIL;
2920 }
2921
2922 int count = 1;
2923 target_addr_t address;
2924
2925 switch (CMD_ARGC) {
2926 case 2:
2927 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2928 /* FALL THROUGH */
2929 case 1:
2930 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2931 break;
2932 default:
2933 return ERROR_COMMAND_SYNTAX_ERROR;
2934 }
2935
2936 return a64_disassemble(CMD, target, address, count);
2937 }
2938
2939 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2940 {
2941 struct target *target = get_current_target(CMD_CTX);
2942 struct aarch64_common *aarch64 = target_to_aarch64(target);
2943
2944 static const Jim_Nvp nvp_maskisr_modes[] = {
2945 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2946 { .name = "on", .value = AARCH64_ISRMASK_ON },
2947 { .name = NULL, .value = -1 },
2948 };
2949 const Jim_Nvp *n;
2950
2951 if (CMD_ARGC > 0) {
2952 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2953 if (n->name == NULL) {
2954 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2955 return ERROR_COMMAND_SYNTAX_ERROR;
2956 }
2957
2958 aarch64->isrmasking_mode = n->value;
2959 }
2960
2961 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2962 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2963
2964 return ERROR_OK;
2965 }
2966
2967 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2968 {
2969 struct command_context *context;
2970 struct target *target;
2971 struct arm *arm;
2972 int retval;
2973 bool is_mcr = false;
2974 int arg_cnt = 0;
2975
2976 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2977 is_mcr = true;
2978 arg_cnt = 7;
2979 } else {
2980 arg_cnt = 6;
2981 }
2982
2983 context = current_command_context(interp);
2984 assert(context != NULL);
2985
2986 target = get_current_target(context);
2987 if (target == NULL) {
2988 LOG_ERROR("%s: no current target", __func__);
2989 return JIM_ERR;
2990 }
2991 if (!target_was_examined(target)) {
2992 LOG_ERROR("%s: not yet examined", target_name(target));
2993 return JIM_ERR;
2994 }
2995
2996 arm = target_to_arm(target);
2997 if (!is_arm(arm)) {
2998 LOG_ERROR("%s: not an ARM", target_name(target));
2999 return JIM_ERR;
3000 }
3001
3002 if (target->state != TARGET_HALTED)
3003 return ERROR_TARGET_NOT_HALTED;
3004
3005 if (arm->core_state == ARM_STATE_AARCH64) {
3006 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
3007 return JIM_ERR;
3008 }
3009
3010 if (argc != arg_cnt) {
3011 LOG_ERROR("%s: wrong number of arguments", __func__);
3012 return JIM_ERR;
3013 }
3014
3015 int cpnum;
3016 uint32_t op1;
3017 uint32_t op2;
3018 uint32_t CRn;
3019 uint32_t CRm;
3020 uint32_t value;
3021 long l;
3022
3023 /* NOTE: parameter sequence matches ARM instruction set usage:
3024 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3025 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3026 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3027 */
3028 retval = Jim_GetLong(interp, argv[1], &l);
3029 if (retval != JIM_OK)
3030 return retval;
3031 if (l & ~0xf) {
3032 LOG_ERROR("%s: %s %d out of range", __func__,
3033 "coprocessor", (int) l);
3034 return JIM_ERR;
3035 }
3036 cpnum = l;
3037
3038 retval = Jim_GetLong(interp, argv[2], &l);
3039 if (retval != JIM_OK)
3040 return retval;
3041 if (l & ~0x7) {
3042 LOG_ERROR("%s: %s %d out of range", __func__,
3043 "op1", (int) l);
3044 return JIM_ERR;
3045 }
3046 op1 = l;
3047
3048 retval = Jim_GetLong(interp, argv[3], &l);
3049 if (retval != JIM_OK)
3050 return retval;
3051 if (l & ~0xf) {
3052 LOG_ERROR("%s: %s %d out of range", __func__,
3053 "CRn", (int) l);
3054 return JIM_ERR;
3055 }
3056 CRn = l;
3057
3058 retval = Jim_GetLong(interp, argv[4], &l);
3059 if (retval != JIM_OK)
3060 return retval;
3061 if (l & ~0xf) {
3062 LOG_ERROR("%s: %s %d out of range", __func__,
3063 "CRm", (int) l);
3064 return JIM_ERR;
3065 }
3066 CRm = l;
3067
3068 retval = Jim_GetLong(interp, argv[5], &l);
3069 if (retval != JIM_OK)
3070 return retval;
3071 if (l & ~0x7) {
3072 LOG_ERROR("%s: %s %d out of range", __func__,
3073 "op2", (int) l);
3074 return JIM_ERR;
3075 }
3076 op2 = l;
3077
3078 value = 0;
3079
3080 if (is_mcr == true) {
3081 retval = Jim_GetLong(interp, argv[6], &l);
3082 if (retval != JIM_OK)
3083 return retval;
3084 value = l;
3085
3086 /* NOTE: parameters reordered! */
3087 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
3088 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
3089 if (retval != ERROR_OK)
3090 return JIM_ERR;
3091 } else {
3092 /* NOTE: parameters reordered! */
3093 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
3094 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
3095 if (retval != ERROR_OK)
3096 return JIM_ERR;
3097
3098 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
3099 }
3100
3101 return JIM_OK;
3102 }
3103
3104 static const struct command_registration aarch64_exec_command_handlers[] = {
3105 {
3106 .name = "cache_info",
3107 .handler = aarch64_handle_cache_info_command,
3108 .mode = COMMAND_EXEC,
3109 .help = "display information about target caches",
3110 .usage = "",
3111 },
3112 {
3113 .name = "dbginit",
3114 .handler = aarch64_handle_dbginit_command,
3115 .mode = COMMAND_EXEC,
3116 .help = "Initialize core debug",
3117 .usage = "",
3118 },
3119 {
3120 .name = "disassemble",
3121 .handler = aarch64_handle_disassemble_command,
3122 .mode = COMMAND_EXEC,
3123 .help = "Disassemble instructions",
3124 .usage = "address [count]",
3125 },
3126 {
3127 .name = "maskisr",
3128 .handler = aarch64_mask_interrupts_command,
3129 .mode = COMMAND_ANY,
3130 .help = "mask aarch64 interrupts during single-step",
3131 .usage = "['on'|'off']",
3132 },
3133 {
3134 .name = "mcr",
3135 .mode = COMMAND_EXEC,
3136 .jim_handler = jim_mcrmrc,
3137 .help = "write coprocessor register",
3138 .usage = "cpnum op1 CRn CRm op2 value",
3139 },
3140 {
3141 .name = "mrc",
3142 .mode = COMMAND_EXEC,
3143 .jim_handler = jim_mcrmrc,
3144 .help = "read coprocessor register",
3145 .usage = "cpnum op1 CRn CRm op2",
3146 },
3147 {
3148 .chain = smp_command_handlers,
3149 },
3150
3151
3152 COMMAND_REGISTRATION_DONE
3153 };
3154
3155 extern const struct command_registration semihosting_common_handlers[];
3156
3157 static const struct command_registration aarch64_command_handlers[] = {
3158 {
3159 .name = "arm",
3160 .mode = COMMAND_ANY,
3161 .help = "ARM Command Group",
3162 .usage = "",
3163 .chain = semihosting_common_handlers
3164 },
3165 {
3166 .chain = armv8_command_handlers,
3167 },
3168 {
3169 .name = "aarch64",
3170 .mode = COMMAND_ANY,
3171 .help = "Aarch64 command group",
3172 .usage = "",
3173 .chain = aarch64_exec_command_handlers,
3174 },
3175 COMMAND_REGISTRATION_DONE
3176 };
3177
3178 struct target_type aarch64_target = {
3179 .name = "aarch64",
3180
3181 .poll = aarch64_poll,
3182 .arch_state = armv8_arch_state,
3183
3184 .halt = aarch64_halt,
3185 .resume = aarch64_resume,
3186 .step = aarch64_step,
3187
3188 .assert_reset = aarch64_assert_reset,
3189 .deassert_reset = aarch64_deassert_reset,
3190
3191 /* REVISIT allow exporting VFP3 registers ... */
3192 .get_gdb_arch = armv8_get_gdb_arch,
3193 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3194
3195 .read_memory = aarch64_read_memory,
3196 .write_memory = aarch64_write_memory,
3197
3198 .add_breakpoint = aarch64_add_breakpoint,
3199 .add_context_breakpoint = aarch64_add_context_breakpoint,
3200 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3201 .remove_breakpoint = aarch64_remove_breakpoint,
3202 .add_watchpoint = aarch64_add_watchpoint,
3203 .remove_watchpoint = aarch64_remove_watchpoint,
3204 .hit_watchpoint = aarch64_hit_watchpoint,
3205
3206 .commands = aarch64_command_handlers,
3207 .target_create = aarch64_target_create,
3208 .target_jim_configure = aarch64_jim_configure,
3209 .init_target = aarch64_init_target,
3210 .deinit_target = aarch64_deinit_target,
3211 .examine = aarch64_examine,
3212
3213 .read_phys_memory = aarch64_read_phys_memory,
3214 .write_phys_memory = aarch64_write_phys_memory,
3215 .mmu = aarch64_mmu,
3216 .virt2phys = aarch64_virt2phys,
3217 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)