aarch64: speed up first examination
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 enum restart_mode {
34 RESTART_LAZY,
35 RESTART_SYNC,
36 };
37
38 enum halt_mode {
39 HALT_LAZY,
40 HALT_SYNC,
41 };
42
43 static int aarch64_poll(struct target *target);
44 static int aarch64_debug_entry(struct target *target);
45 static int aarch64_restore_context(struct target *target, bool bpwp);
46 static int aarch64_set_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_context_breakpoint(struct target *target,
49 struct breakpoint *breakpoint, uint8_t matchmode);
50 static int aarch64_set_hybrid_breakpoint(struct target *target,
51 struct breakpoint *breakpoint);
52 static int aarch64_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int aarch64_mmu(struct target *target, int *enabled);
55 static int aarch64_virt2phys(struct target *target,
56 target_addr_t virt, target_addr_t *phys);
57 static int aarch64_read_cpu_memory(struct target *target,
58 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
59
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int aarch64_restore_system_control_reg(struct target *target)
64 {
65 enum arm_mode target_mode = ARM_MODE_ANY;
66 int retval = ERROR_OK;
67 uint32_t instr;
68
69 struct aarch64_common *aarch64 = target_to_aarch64(target);
70 struct armv8_common *armv8 = target_to_armv8(target);
71
72 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
73 aarch64->system_control_reg_curr = aarch64->system_control_reg;
74 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
75
76 switch (armv8->arm.core_mode) {
77 case ARMV8_64_EL0T:
78 target_mode = ARMV8_64_EL1H;
79 /* fall through */
80 case ARMV8_64_EL1T:
81 case ARMV8_64_EL1H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
83 break;
84 case ARMV8_64_EL2T:
85 case ARMV8_64_EL2H:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
87 break;
88 case ARMV8_64_EL3H:
89 case ARMV8_64_EL3T:
90 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
91 break;
92
93 case ARM_MODE_SVC:
94 case ARM_MODE_ABT:
95 case ARM_MODE_FIQ:
96 case ARM_MODE_IRQ:
97 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
98 break;
99
100 default:
101 LOG_INFO("cannot read system control register in this mode");
102 return ERROR_FAIL;
103 }
104
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
111
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114 }
115
116 return retval;
117 }
118
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 uint32_t instr = 0;
128
129 if (enable) {
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64->system_control_reg & 0x1U)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133 return ERROR_FAIL;
134 }
135 if (!(aarch64->system_control_reg_curr & 0x1U))
136 aarch64->system_control_reg_curr |= 0x1U;
137 } else {
138 if (aarch64->system_control_reg_curr & 0x4U) {
139 /* data cache is active */
140 aarch64->system_control_reg_curr &= ~0x4U;
141 /* flush data cache armv8 function to be called */
142 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
144 }
145 if ((aarch64->system_control_reg_curr & 0x1U)) {
146 aarch64->system_control_reg_curr &= ~0x1U;
147 }
148 }
149
150 switch (armv8->arm.core_mode) {
151 case ARMV8_64_EL0T:
152 case ARMV8_64_EL1T:
153 case ARMV8_64_EL1H:
154 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
155 break;
156 case ARMV8_64_EL2T:
157 case ARMV8_64_EL2H:
158 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
159 break;
160 case ARMV8_64_EL3H:
161 case ARMV8_64_EL3T:
162 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
163 break;
164 default:
165 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
166 break;
167 }
168
169 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
170 aarch64->system_control_reg_curr);
171 return retval;
172 }
173
174 /*
175 * Basic debug access, very low level assumes state is saved
176 */
177 static int aarch64_init_debug_access(struct target *target)
178 {
179 struct armv8_common *armv8 = target_to_armv8(target);
180 int retval;
181 uint32_t dummy;
182
183 LOG_DEBUG("%s", target_name(target));
184
185 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
186 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
187 if (retval != ERROR_OK) {
188 LOG_DEBUG("Examine %s failed", "oslock");
189 return retval;
190 }
191
192 /* Clear Sticky Power Down status Bit in PRSR to enable access to
193 the registers in the Core Power Domain */
194 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
195 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
196 if (retval != ERROR_OK)
197 return retval;
198
199 /*
200 * Static CTI configuration:
201 * Channel 0 -> trigger outputs HALT request to PE
202 * Channel 1 -> trigger outputs Resume request to PE
203 * Gate all channel trigger events from entering the CTM
204 */
205
206 /* Enable CTI */
207 retval = arm_cti_enable(armv8->cti, true);
208 /* By default, gate all channel events to and from the CTM */
209 if (retval == ERROR_OK)
210 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
211 /* output halt requests to PE on channel 0 event */
212 if (retval == ERROR_OK)
213 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
214 /* output restart requests to PE on channel 1 event */
215 if (retval == ERROR_OK)
216 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Resync breakpoint registers */
221
222 return ERROR_OK;
223 }
224
225 /* Write to memory mapped registers directly with no cache or mmu handling */
226 static int aarch64_dap_write_memap_register_u32(struct target *target,
227 uint32_t address,
228 uint32_t value)
229 {
230 int retval;
231 struct armv8_common *armv8 = target_to_armv8(target);
232
233 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
234
235 return retval;
236 }
237
238 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
239 {
240 struct arm_dpm *dpm = &a8->armv8_common.dpm;
241 int retval;
242
243 dpm->arm = &a8->armv8_common.arm;
244 dpm->didr = debug;
245
246 retval = armv8_dpm_setup(dpm);
247 if (retval == ERROR_OK)
248 retval = armv8_dpm_initialize(dpm);
249
250 return retval;
251 }
252
253 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
254 {
255 struct armv8_common *armv8 = target_to_armv8(target);
256 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
257 }
258
259 static int aarch64_check_state_one(struct target *target,
260 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
261 {
262 struct armv8_common *armv8 = target_to_armv8(target);
263 uint32_t prsr;
264 int retval;
265
266 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
267 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
268 if (retval != ERROR_OK)
269 return retval;
270
271 if (p_prsr)
272 *p_prsr = prsr;
273
274 if (p_result)
275 *p_result = (prsr & mask) == (val & mask);
276
277 return ERROR_OK;
278 }
279
280 static int aarch64_wait_halt_one(struct target *target)
281 {
282 int retval = ERROR_OK;
283 uint32_t prsr;
284
285 int64_t then = timeval_ms();
286 for (;;) {
287 int halted;
288
289 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
290 if (retval != ERROR_OK || halted)
291 break;
292
293 if (timeval_ms() > then + 1000) {
294 retval = ERROR_TARGET_TIMEOUT;
295 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
296 break;
297 }
298 }
299 return retval;
300 }
301
302 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
303 {
304 int retval = ERROR_OK;
305 struct target_list *head = target->head;
306 struct target *first = NULL;
307
308 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
309
310 while (head != NULL) {
311 struct target *curr = head->target;
312 struct armv8_common *armv8 = target_to_armv8(curr);
313 head = head->next;
314
315 if (exc_target && curr == target)
316 continue;
317 if (!target_was_examined(curr))
318 continue;
319 if (curr->state != TARGET_RUNNING)
320 continue;
321
322 /* HACK: mark this target as prepared for halting */
323 curr->debug_reason = DBG_REASON_DBGRQ;
324
325 /* open the gate for channel 0 to let HALT requests pass to the CTM */
326 retval = arm_cti_ungate_channel(armv8->cti, 0);
327 if (retval == ERROR_OK)
328 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
329 if (retval != ERROR_OK)
330 break;
331
332 LOG_DEBUG("target %s prepared", target_name(curr));
333
334 if (first == NULL)
335 first = curr;
336 }
337
338 if (p_first) {
339 if (exc_target && first)
340 *p_first = first;
341 else
342 *p_first = target;
343 }
344
345 return retval;
346 }
347
348 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
349 {
350 int retval = ERROR_OK;
351 struct armv8_common *armv8 = target_to_armv8(target);
352
353 LOG_DEBUG("%s", target_name(target));
354
355 /* allow Halting Debug Mode */
356 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
357 if (retval != ERROR_OK)
358 return retval;
359
360 /* trigger an event on channel 0, this outputs a halt request to the PE */
361 retval = arm_cti_pulse_channel(armv8->cti, 0);
362 if (retval != ERROR_OK)
363 return retval;
364
365 if (mode == HALT_SYNC) {
366 retval = aarch64_wait_halt_one(target);
367 if (retval != ERROR_OK) {
368 if (retval == ERROR_TARGET_TIMEOUT)
369 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
370 return retval;
371 }
372 }
373
374 return ERROR_OK;
375 }
376
377 static int aarch64_halt_smp(struct target *target, bool exc_target)
378 {
379 struct target *next = target;
380 int retval;
381
382 /* prepare halt on all PEs of the group */
383 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
384
385 if (exc_target && next == target)
386 return retval;
387
388 /* halt the target PE */
389 if (retval == ERROR_OK)
390 retval = aarch64_halt_one(next, HALT_LAZY);
391
392 if (retval != ERROR_OK)
393 return retval;
394
395 /* wait for all PEs to halt */
396 int64_t then = timeval_ms();
397 for (;;) {
398 bool all_halted = true;
399 struct target_list *head;
400 struct target *curr;
401
402 foreach_smp_target(head, target->head) {
403 int halted;
404
405 curr = head->target;
406
407 if (!target_was_examined(curr))
408 continue;
409
410 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
411 if (retval != ERROR_OK || !halted) {
412 all_halted = false;
413 break;
414 }
415 }
416
417 if (all_halted)
418 break;
419
420 if (timeval_ms() > then + 1000) {
421 retval = ERROR_TARGET_TIMEOUT;
422 break;
423 }
424
425 /*
426 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
427 * and it looks like the CTI's are not connected by a common
428 * trigger matrix. It seems that we need to halt one core in each
429 * cluster explicitly. So if we find that a core has not halted
430 * yet, we trigger an explicit halt for the second cluster.
431 */
432 retval = aarch64_halt_one(curr, HALT_LAZY);
433 if (retval != ERROR_OK)
434 break;
435 }
436
437 return retval;
438 }
439
440 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
441 {
442 struct target *gdb_target = NULL;
443 struct target_list *head;
444 struct target *curr;
445
446 if (debug_reason == DBG_REASON_NOTHALTED) {
447 LOG_INFO("Halting remaining targets in SMP group");
448 aarch64_halt_smp(target, true);
449 }
450
451 /* poll all targets in the group, but skip the target that serves GDB */
452 foreach_smp_target(head, target->head) {
453 curr = head->target;
454 /* skip calling context */
455 if (curr == target)
456 continue;
457 if (!target_was_examined(curr))
458 continue;
459 /* skip targets that were already halted */
460 if (curr->state == TARGET_HALTED)
461 continue;
462 /* remember the gdb_service->target */
463 if (curr->gdb_service != NULL)
464 gdb_target = curr->gdb_service->target;
465 /* skip it */
466 if (curr == gdb_target)
467 continue;
468
469 /* avoid recursion in aarch64_poll() */
470 curr->smp = 0;
471 aarch64_poll(curr);
472 curr->smp = 1;
473 }
474
475 /* after all targets were updated, poll the gdb serving target */
476 if (gdb_target != NULL && gdb_target != target)
477 aarch64_poll(gdb_target);
478
479 return ERROR_OK;
480 }
481
482 /*
483 * Aarch64 Run control
484 */
485
486 static int aarch64_poll(struct target *target)
487 {
488 enum target_state prev_target_state;
489 int retval = ERROR_OK;
490 int halted;
491
492 retval = aarch64_check_state_one(target,
493 PRSR_HALT, PRSR_HALT, &halted, NULL);
494 if (retval != ERROR_OK)
495 return retval;
496
497 if (halted) {
498 prev_target_state = target->state;
499 if (prev_target_state != TARGET_HALTED) {
500 enum target_debug_reason debug_reason = target->debug_reason;
501
502 /* We have a halting debug event */
503 target->state = TARGET_HALTED;
504 LOG_DEBUG("Target %s halted", target_name(target));
505 retval = aarch64_debug_entry(target);
506 if (retval != ERROR_OK)
507 return retval;
508
509 if (target->smp)
510 update_halt_gdb(target, debug_reason);
511
512 switch (prev_target_state) {
513 case TARGET_RUNNING:
514 case TARGET_UNKNOWN:
515 case TARGET_RESET:
516 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
517 break;
518 case TARGET_DEBUG_RUNNING:
519 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
520 break;
521 default:
522 break;
523 }
524 }
525 } else
526 target->state = TARGET_RUNNING;
527
528 return retval;
529 }
530
531 static int aarch64_halt(struct target *target)
532 {
533 if (target->smp)
534 return aarch64_halt_smp(target, false);
535
536 return aarch64_halt_one(target, HALT_SYNC);
537 }
538
539 static int aarch64_restore_one(struct target *target, int current,
540 uint64_t *address, int handle_breakpoints, int debug_execution)
541 {
542 struct armv8_common *armv8 = target_to_armv8(target);
543 struct arm *arm = &armv8->arm;
544 int retval;
545 uint64_t resume_pc;
546
547 LOG_DEBUG("%s", target_name(target));
548
549 if (!debug_execution)
550 target_free_all_working_areas(target);
551
552 /* current = 1: continue on current pc, otherwise continue at <address> */
553 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
554 if (!current)
555 resume_pc = *address;
556 else
557 *address = resume_pc;
558
559 /* Make sure that the Armv7 gdb thumb fixups does not
560 * kill the return address
561 */
562 switch (arm->core_state) {
563 case ARM_STATE_ARM:
564 resume_pc &= 0xFFFFFFFC;
565 break;
566 case ARM_STATE_AARCH64:
567 resume_pc &= 0xFFFFFFFFFFFFFFFC;
568 break;
569 case ARM_STATE_THUMB:
570 case ARM_STATE_THUMB_EE:
571 /* When the return address is loaded into PC
572 * bit 0 must be 1 to stay in Thumb state
573 */
574 resume_pc |= 0x1;
575 break;
576 case ARM_STATE_JAZELLE:
577 LOG_ERROR("How do I resume into Jazelle state??");
578 return ERROR_FAIL;
579 }
580 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
581 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
582 arm->pc->dirty = 1;
583 arm->pc->valid = 1;
584
585 /* called it now before restoring context because it uses cpu
586 * register r0 for restoring system control register */
587 retval = aarch64_restore_system_control_reg(target);
588 if (retval == ERROR_OK)
589 retval = aarch64_restore_context(target, handle_breakpoints);
590
591 return retval;
592 }
593
594 /**
595 * prepare single target for restart
596 *
597 *
598 */
599 static int aarch64_prepare_restart_one(struct target *target)
600 {
601 struct armv8_common *armv8 = target_to_armv8(target);
602 int retval;
603 uint32_t dscr;
604 uint32_t tmp;
605
606 LOG_DEBUG("%s", target_name(target));
607
608 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
609 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
610 if (retval != ERROR_OK)
611 return retval;
612
613 if ((dscr & DSCR_ITE) == 0)
614 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
615 if ((dscr & DSCR_ERR) != 0)
616 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
617
618 /* acknowledge a pending CTI halt event */
619 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
620 /*
621 * open the CTI gate for channel 1 so that the restart events
622 * get passed along to all PEs. Also close gate for channel 0
623 * to isolate the PE from halt events.
624 */
625 if (retval == ERROR_OK)
626 retval = arm_cti_ungate_channel(armv8->cti, 1);
627 if (retval == ERROR_OK)
628 retval = arm_cti_gate_channel(armv8->cti, 0);
629
630 /* make sure that DSCR.HDE is set */
631 if (retval == ERROR_OK) {
632 dscr |= DSCR_HDE;
633 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
635 }
636
637 /* clear sticky bits in PRSR, SDR is now 0 */
638 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
639 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
640
641 return retval;
642 }
643
644 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
645 {
646 struct armv8_common *armv8 = target_to_armv8(target);
647 int retval;
648
649 LOG_DEBUG("%s", target_name(target));
650
651 /* trigger an event on channel 1, generates a restart request to the PE */
652 retval = arm_cti_pulse_channel(armv8->cti, 1);
653 if (retval != ERROR_OK)
654 return retval;
655
656 if (mode == RESTART_SYNC) {
657 int64_t then = timeval_ms();
658 for (;;) {
659 int resumed;
660 /*
661 * if PRSR.SDR is set now, the target did restart, even
662 * if it's now already halted again (e.g. due to breakpoint)
663 */
664 retval = aarch64_check_state_one(target,
665 PRSR_SDR, PRSR_SDR, &resumed, NULL);
666 if (retval != ERROR_OK || resumed)
667 break;
668
669 if (timeval_ms() > then + 1000) {
670 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
671 retval = ERROR_TARGET_TIMEOUT;
672 break;
673 }
674 }
675 }
676
677 if (retval != ERROR_OK)
678 return retval;
679
680 target->debug_reason = DBG_REASON_NOTHALTED;
681 target->state = TARGET_RUNNING;
682
683 return ERROR_OK;
684 }
685
686 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
687 {
688 int retval;
689
690 LOG_DEBUG("%s", target_name(target));
691
692 retval = aarch64_prepare_restart_one(target);
693 if (retval == ERROR_OK)
694 retval = aarch64_do_restart_one(target, mode);
695
696 return retval;
697 }
698
699 /*
700 * prepare all but the current target for restart
701 */
702 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
703 {
704 int retval = ERROR_OK;
705 struct target_list *head;
706 struct target *first = NULL;
707 uint64_t address;
708
709 foreach_smp_target(head, target->head) {
710 struct target *curr = head->target;
711
712 /* skip calling target */
713 if (curr == target)
714 continue;
715 if (!target_was_examined(curr))
716 continue;
717 if (curr->state != TARGET_HALTED)
718 continue;
719
720 /* resume at current address, not in step mode */
721 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
722 if (retval == ERROR_OK)
723 retval = aarch64_prepare_restart_one(curr);
724 if (retval != ERROR_OK) {
725 LOG_ERROR("failed to restore target %s", target_name(curr));
726 break;
727 }
728 /* remember the first valid target in the group */
729 if (first == NULL)
730 first = curr;
731 }
732
733 if (p_first)
734 *p_first = first;
735
736 return retval;
737 }
738
739
740 static int aarch64_step_restart_smp(struct target *target)
741 {
742 int retval = ERROR_OK;
743 struct target_list *head;
744 struct target *first = NULL;
745
746 LOG_DEBUG("%s", target_name(target));
747
748 retval = aarch64_prep_restart_smp(target, 0, &first);
749 if (retval != ERROR_OK)
750 return retval;
751
752 if (first != NULL)
753 retval = aarch64_do_restart_one(first, RESTART_LAZY);
754 if (retval != ERROR_OK) {
755 LOG_DEBUG("error restarting target %s", target_name(first));
756 return retval;
757 }
758
759 int64_t then = timeval_ms();
760 for (;;) {
761 struct target *curr = target;
762 bool all_resumed = true;
763
764 foreach_smp_target(head, target->head) {
765 uint32_t prsr;
766 int resumed;
767
768 curr = head->target;
769
770 if (curr == target)
771 continue;
772
773 if (!target_was_examined(curr))
774 continue;
775
776 retval = aarch64_check_state_one(curr,
777 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
778 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
779 all_resumed = false;
780 break;
781 }
782
783 if (curr->state != TARGET_RUNNING) {
784 curr->state = TARGET_RUNNING;
785 curr->debug_reason = DBG_REASON_NOTHALTED;
786 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
787 }
788 }
789
790 if (all_resumed)
791 break;
792
793 if (timeval_ms() > then + 1000) {
794 LOG_ERROR("%s: timeout waiting for target resume", __func__);
795 retval = ERROR_TARGET_TIMEOUT;
796 break;
797 }
798 /*
799 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
800 * and it looks like the CTI's are not connected by a common
801 * trigger matrix. It seems that we need to halt one core in each
802 * cluster explicitly. So if we find that a core has not halted
803 * yet, we trigger an explicit resume for the second cluster.
804 */
805 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
806 if (retval != ERROR_OK)
807 break;
808 }
809
810 return retval;
811 }
812
813 static int aarch64_resume(struct target *target, int current,
814 target_addr_t address, int handle_breakpoints, int debug_execution)
815 {
816 int retval = 0;
817 uint64_t addr = address;
818
819 if (target->state != TARGET_HALTED)
820 return ERROR_TARGET_NOT_HALTED;
821
822 /*
823 * If this target is part of a SMP group, prepare the others
824 * targets for resuming. This involves restoring the complete
825 * target register context and setting up CTI gates to accept
826 * resume events from the trigger matrix.
827 */
828 if (target->smp) {
829 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
830 if (retval != ERROR_OK)
831 return retval;
832 }
833
834 /* all targets prepared, restore and restart the current target */
835 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
836 debug_execution);
837 if (retval == ERROR_OK)
838 retval = aarch64_restart_one(target, RESTART_SYNC);
839 if (retval != ERROR_OK)
840 return retval;
841
842 if (target->smp) {
843 int64_t then = timeval_ms();
844 for (;;) {
845 struct target *curr = target;
846 struct target_list *head;
847 bool all_resumed = true;
848
849 foreach_smp_target(head, target->head) {
850 uint32_t prsr;
851 int resumed;
852
853 curr = head->target;
854 if (curr == target)
855 continue;
856 if (!target_was_examined(curr))
857 continue;
858
859 retval = aarch64_check_state_one(curr,
860 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
861 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
862 all_resumed = false;
863 break;
864 }
865
866 if (curr->state != TARGET_RUNNING) {
867 curr->state = TARGET_RUNNING;
868 curr->debug_reason = DBG_REASON_NOTHALTED;
869 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
870 }
871 }
872
873 if (all_resumed)
874 break;
875
876 if (timeval_ms() > then + 1000) {
877 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
878 retval = ERROR_TARGET_TIMEOUT;
879 break;
880 }
881
882 /*
883 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
884 * and it looks like the CTI's are not connected by a common
885 * trigger matrix. It seems that we need to halt one core in each
886 * cluster explicitly. So if we find that a core has not halted
887 * yet, we trigger an explicit resume for the second cluster.
888 */
889 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
890 if (retval != ERROR_OK)
891 break;
892 }
893 }
894
895 if (retval != ERROR_OK)
896 return retval;
897
898 target->debug_reason = DBG_REASON_NOTHALTED;
899
900 if (!debug_execution) {
901 target->state = TARGET_RUNNING;
902 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
903 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
904 } else {
905 target->state = TARGET_DEBUG_RUNNING;
906 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
907 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
908 }
909
910 return ERROR_OK;
911 }
912
913 static int aarch64_debug_entry(struct target *target)
914 {
915 int retval = ERROR_OK;
916 struct armv8_common *armv8 = target_to_armv8(target);
917 struct arm_dpm *dpm = &armv8->dpm;
918 enum arm_state core_state;
919 uint32_t dscr;
920
921 /* make sure to clear all sticky errors */
922 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
923 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
924 if (retval == ERROR_OK)
925 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
926 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
927 if (retval == ERROR_OK)
928 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
929
930 if (retval != ERROR_OK)
931 return retval;
932
933 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
934
935 dpm->dscr = dscr;
936 core_state = armv8_dpm_get_core_state(dpm);
937 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
938 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
939
940 /* close the CTI gate for all events */
941 if (retval == ERROR_OK)
942 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
943 /* discard async exceptions */
944 if (retval == ERROR_OK)
945 retval = dpm->instr_cpsr_sync(dpm);
946 if (retval != ERROR_OK)
947 return retval;
948
949 /* Examine debug reason */
950 armv8_dpm_report_dscr(dpm, dscr);
951
952 /* save address of instruction that triggered the watchpoint? */
953 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
954 uint32_t tmp;
955 uint64_t wfar = 0;
956
957 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
958 armv8->debug_base + CPUV8_DBG_WFAR1,
959 &tmp);
960 if (retval != ERROR_OK)
961 return retval;
962 wfar = tmp;
963 wfar = (wfar << 32);
964 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
965 armv8->debug_base + CPUV8_DBG_WFAR0,
966 &tmp);
967 if (retval != ERROR_OK)
968 return retval;
969 wfar |= tmp;
970 armv8_dpm_report_wfar(&armv8->dpm, wfar);
971 }
972
973 retval = armv8_dpm_read_current_registers(&armv8->dpm);
974
975 if (retval == ERROR_OK && armv8->post_debug_entry)
976 retval = armv8->post_debug_entry(target);
977
978 return retval;
979 }
980
981 static int aarch64_post_debug_entry(struct target *target)
982 {
983 struct aarch64_common *aarch64 = target_to_aarch64(target);
984 struct armv8_common *armv8 = &aarch64->armv8_common;
985 int retval;
986 enum arm_mode target_mode = ARM_MODE_ANY;
987 uint32_t instr;
988
989 switch (armv8->arm.core_mode) {
990 case ARMV8_64_EL0T:
991 target_mode = ARMV8_64_EL1H;
992 /* fall through */
993 case ARMV8_64_EL1T:
994 case ARMV8_64_EL1H:
995 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
996 break;
997 case ARMV8_64_EL2T:
998 case ARMV8_64_EL2H:
999 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1000 break;
1001 case ARMV8_64_EL3H:
1002 case ARMV8_64_EL3T:
1003 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1004 break;
1005
1006 case ARM_MODE_SVC:
1007 case ARM_MODE_ABT:
1008 case ARM_MODE_FIQ:
1009 case ARM_MODE_IRQ:
1010 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1011 break;
1012
1013 default:
1014 LOG_INFO("cannot read system control register in this mode");
1015 return ERROR_FAIL;
1016 }
1017
1018 if (target_mode != ARM_MODE_ANY)
1019 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1020
1021 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1022 if (retval != ERROR_OK)
1023 return retval;
1024
1025 if (target_mode != ARM_MODE_ANY)
1026 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1027
1028 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1029 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1030
1031 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1032 armv8_identify_cache(armv8);
1033 armv8_read_mpidr(armv8);
1034 }
1035
1036 armv8->armv8_mmu.mmu_enabled =
1037 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1038 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1039 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1040 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1041 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1042 return ERROR_OK;
1043 }
1044
1045 /*
1046 * single-step a target
1047 */
1048 static int aarch64_step(struct target *target, int current, target_addr_t address,
1049 int handle_breakpoints)
1050 {
1051 struct armv8_common *armv8 = target_to_armv8(target);
1052 struct aarch64_common *aarch64 = target_to_aarch64(target);
1053 int saved_retval = ERROR_OK;
1054 int retval;
1055 uint32_t edecr;
1056
1057 if (target->state != TARGET_HALTED) {
1058 LOG_WARNING("target not halted");
1059 return ERROR_TARGET_NOT_HALTED;
1060 }
1061
1062 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1063 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1064 /* make sure EDECR.SS is not set when restoring the register */
1065
1066 if (retval == ERROR_OK) {
1067 edecr &= ~0x4;
1068 /* set EDECR.SS to enter hardware step mode */
1069 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1070 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1071 }
1072 /* disable interrupts while stepping */
1073 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1074 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1075 /* bail out if stepping setup has failed */
1076 if (retval != ERROR_OK)
1077 return retval;
1078
1079 if (target->smp && !handle_breakpoints) {
1080 /*
1081 * isolate current target so that it doesn't get resumed
1082 * together with the others
1083 */
1084 retval = arm_cti_gate_channel(armv8->cti, 1);
1085 /* resume all other targets in the group */
1086 if (retval == ERROR_OK)
1087 retval = aarch64_step_restart_smp(target);
1088 if (retval != ERROR_OK) {
1089 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1090 return retval;
1091 }
1092 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1093 }
1094
1095 /* all other targets running, restore and restart the current target */
1096 retval = aarch64_restore_one(target, current, &address, 0, 0);
1097 if (retval == ERROR_OK)
1098 retval = aarch64_restart_one(target, RESTART_LAZY);
1099
1100 if (retval != ERROR_OK)
1101 return retval;
1102
1103 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1104 if (!handle_breakpoints)
1105 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1106
1107 int64_t then = timeval_ms();
1108 for (;;) {
1109 int stepped;
1110 uint32_t prsr;
1111
1112 retval = aarch64_check_state_one(target,
1113 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1114 if (retval != ERROR_OK || stepped)
1115 break;
1116
1117 if (timeval_ms() > then + 100) {
1118 LOG_ERROR("timeout waiting for target %s halt after step",
1119 target_name(target));
1120 retval = ERROR_TARGET_TIMEOUT;
1121 break;
1122 }
1123 }
1124
1125 /*
1126 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1127 * causes a timeout. The core takes the step but doesn't complete it and so
1128 * debug state is never entered. However, you can manually halt the core
1129 * as an external debug even is also a WFI wakeup event.
1130 */
1131 if (retval == ERROR_TARGET_TIMEOUT)
1132 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1133
1134 /* restore EDECR */
1135 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1136 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1137 if (retval != ERROR_OK)
1138 return retval;
1139
1140 /* restore interrupts */
1141 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1142 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1143 if (retval != ERROR_OK)
1144 return ERROR_OK;
1145 }
1146
1147 if (saved_retval != ERROR_OK)
1148 return saved_retval;
1149
1150 return aarch64_poll(target);
1151 }
1152
1153 static int aarch64_restore_context(struct target *target, bool bpwp)
1154 {
1155 struct armv8_common *armv8 = target_to_armv8(target);
1156 struct arm *arm = &armv8->arm;
1157
1158 int retval;
1159
1160 LOG_DEBUG("%s", target_name(target));
1161
1162 if (armv8->pre_restore_context)
1163 armv8->pre_restore_context(target);
1164
1165 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1166 if (retval == ERROR_OK) {
1167 /* registers are now invalid */
1168 register_cache_invalidate(arm->core_cache);
1169 register_cache_invalidate(arm->core_cache->next);
1170 }
1171
1172 return retval;
1173 }
1174
1175 /*
1176 * Cortex-A8 Breakpoint and watchpoint functions
1177 */
1178
1179 /* Setup hardware Breakpoint Register Pair */
1180 static int aarch64_set_breakpoint(struct target *target,
1181 struct breakpoint *breakpoint, uint8_t matchmode)
1182 {
1183 int retval;
1184 int brp_i = 0;
1185 uint32_t control;
1186 uint8_t byte_addr_select = 0x0F;
1187 struct aarch64_common *aarch64 = target_to_aarch64(target);
1188 struct armv8_common *armv8 = &aarch64->armv8_common;
1189 struct aarch64_brp *brp_list = aarch64->brp_list;
1190
1191 if (breakpoint->set) {
1192 LOG_WARNING("breakpoint already set");
1193 return ERROR_OK;
1194 }
1195
1196 if (breakpoint->type == BKPT_HARD) {
1197 int64_t bpt_value;
1198 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1199 brp_i++;
1200 if (brp_i >= aarch64->brp_num) {
1201 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1202 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1203 }
1204 breakpoint->set = brp_i + 1;
1205 if (breakpoint->length == 2)
1206 byte_addr_select = (3 << (breakpoint->address & 0x02));
1207 control = ((matchmode & 0x7) << 20)
1208 | (1 << 13)
1209 | (byte_addr_select << 5)
1210 | (3 << 1) | 1;
1211 brp_list[brp_i].used = 1;
1212 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1213 brp_list[brp_i].control = control;
1214 bpt_value = brp_list[brp_i].value;
1215
1216 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1217 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1218 (uint32_t)(bpt_value & 0xFFFFFFFF));
1219 if (retval != ERROR_OK)
1220 return retval;
1221 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1222 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1223 (uint32_t)(bpt_value >> 32));
1224 if (retval != ERROR_OK)
1225 return retval;
1226
1227 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1228 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1229 brp_list[brp_i].control);
1230 if (retval != ERROR_OK)
1231 return retval;
1232 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1233 brp_list[brp_i].control,
1234 brp_list[brp_i].value);
1235
1236 } else if (breakpoint->type == BKPT_SOFT) {
1237 uint8_t code[4];
1238
1239 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1240 retval = target_read_memory(target,
1241 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1242 breakpoint->length, 1,
1243 breakpoint->orig_instr);
1244 if (retval != ERROR_OK)
1245 return retval;
1246
1247 armv8_cache_d_inner_flush_virt(armv8,
1248 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1249 breakpoint->length);
1250
1251 retval = target_write_memory(target,
1252 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1253 breakpoint->length, 1, code);
1254 if (retval != ERROR_OK)
1255 return retval;
1256
1257 armv8_cache_d_inner_flush_virt(armv8,
1258 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1259 breakpoint->length);
1260
1261 armv8_cache_i_inner_inval_virt(armv8,
1262 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1263 breakpoint->length);
1264
1265 breakpoint->set = 0x11; /* Any nice value but 0 */
1266 }
1267
1268 /* Ensure that halting debug mode is enable */
1269 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1270 if (retval != ERROR_OK) {
1271 LOG_DEBUG("Failed to set DSCR.HDE");
1272 return retval;
1273 }
1274
1275 return ERROR_OK;
1276 }
1277
1278 static int aarch64_set_context_breakpoint(struct target *target,
1279 struct breakpoint *breakpoint, uint8_t matchmode)
1280 {
1281 int retval = ERROR_FAIL;
1282 int brp_i = 0;
1283 uint32_t control;
1284 uint8_t byte_addr_select = 0x0F;
1285 struct aarch64_common *aarch64 = target_to_aarch64(target);
1286 struct armv8_common *armv8 = &aarch64->armv8_common;
1287 struct aarch64_brp *brp_list = aarch64->brp_list;
1288
1289 if (breakpoint->set) {
1290 LOG_WARNING("breakpoint already set");
1291 return retval;
1292 }
1293 /*check available context BRPs*/
1294 while ((brp_list[brp_i].used ||
1295 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1296 brp_i++;
1297
1298 if (brp_i >= aarch64->brp_num) {
1299 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1300 return ERROR_FAIL;
1301 }
1302
1303 breakpoint->set = brp_i + 1;
1304 control = ((matchmode & 0x7) << 20)
1305 | (1 << 13)
1306 | (byte_addr_select << 5)
1307 | (3 << 1) | 1;
1308 brp_list[brp_i].used = 1;
1309 brp_list[brp_i].value = (breakpoint->asid);
1310 brp_list[brp_i].control = control;
1311 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1312 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1313 brp_list[brp_i].value);
1314 if (retval != ERROR_OK)
1315 return retval;
1316 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1317 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1318 brp_list[brp_i].control);
1319 if (retval != ERROR_OK)
1320 return retval;
1321 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1322 brp_list[brp_i].control,
1323 brp_list[brp_i].value);
1324 return ERROR_OK;
1325
1326 }
1327
1328 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1329 {
1330 int retval = ERROR_FAIL;
1331 int brp_1 = 0; /* holds the contextID pair */
1332 int brp_2 = 0; /* holds the IVA pair */
1333 uint32_t control_CTX, control_IVA;
1334 uint8_t CTX_byte_addr_select = 0x0F;
1335 uint8_t IVA_byte_addr_select = 0x0F;
1336 uint8_t CTX_machmode = 0x03;
1337 uint8_t IVA_machmode = 0x01;
1338 struct aarch64_common *aarch64 = target_to_aarch64(target);
1339 struct armv8_common *armv8 = &aarch64->armv8_common;
1340 struct aarch64_brp *brp_list = aarch64->brp_list;
1341
1342 if (breakpoint->set) {
1343 LOG_WARNING("breakpoint already set");
1344 return retval;
1345 }
1346 /*check available context BRPs*/
1347 while ((brp_list[brp_1].used ||
1348 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1349 brp_1++;
1350
1351 printf("brp(CTX) found num: %d\n", brp_1);
1352 if (brp_1 >= aarch64->brp_num) {
1353 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1354 return ERROR_FAIL;
1355 }
1356
1357 while ((brp_list[brp_2].used ||
1358 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1359 brp_2++;
1360
1361 printf("brp(IVA) found num: %d\n", brp_2);
1362 if (brp_2 >= aarch64->brp_num) {
1363 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1364 return ERROR_FAIL;
1365 }
1366
1367 breakpoint->set = brp_1 + 1;
1368 breakpoint->linked_BRP = brp_2;
1369 control_CTX = ((CTX_machmode & 0x7) << 20)
1370 | (brp_2 << 16)
1371 | (0 << 14)
1372 | (CTX_byte_addr_select << 5)
1373 | (3 << 1) | 1;
1374 brp_list[brp_1].used = 1;
1375 brp_list[brp_1].value = (breakpoint->asid);
1376 brp_list[brp_1].control = control_CTX;
1377 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1378 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1379 brp_list[brp_1].value);
1380 if (retval != ERROR_OK)
1381 return retval;
1382 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1383 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1384 brp_list[brp_1].control);
1385 if (retval != ERROR_OK)
1386 return retval;
1387
1388 control_IVA = ((IVA_machmode & 0x7) << 20)
1389 | (brp_1 << 16)
1390 | (1 << 13)
1391 | (IVA_byte_addr_select << 5)
1392 | (3 << 1) | 1;
1393 brp_list[brp_2].used = 1;
1394 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1395 brp_list[brp_2].control = control_IVA;
1396 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1397 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1398 brp_list[brp_2].value & 0xFFFFFFFF);
1399 if (retval != ERROR_OK)
1400 return retval;
1401 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1402 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1403 brp_list[brp_2].value >> 32);
1404 if (retval != ERROR_OK)
1405 return retval;
1406 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1407 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1408 brp_list[brp_2].control);
1409 if (retval != ERROR_OK)
1410 return retval;
1411
1412 return ERROR_OK;
1413 }
1414
1415 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1416 {
1417 int retval;
1418 struct aarch64_common *aarch64 = target_to_aarch64(target);
1419 struct armv8_common *armv8 = &aarch64->armv8_common;
1420 struct aarch64_brp *brp_list = aarch64->brp_list;
1421
1422 if (!breakpoint->set) {
1423 LOG_WARNING("breakpoint not set");
1424 return ERROR_OK;
1425 }
1426
1427 if (breakpoint->type == BKPT_HARD) {
1428 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1429 int brp_i = breakpoint->set - 1;
1430 int brp_j = breakpoint->linked_BRP;
1431 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1432 LOG_DEBUG("Invalid BRP number in breakpoint");
1433 return ERROR_OK;
1434 }
1435 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1436 brp_list[brp_i].control, brp_list[brp_i].value);
1437 brp_list[brp_i].used = 0;
1438 brp_list[brp_i].value = 0;
1439 brp_list[brp_i].control = 0;
1440 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1441 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1442 brp_list[brp_i].control);
1443 if (retval != ERROR_OK)
1444 return retval;
1445 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1446 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1447 (uint32_t)brp_list[brp_i].value);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1451 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1452 (uint32_t)brp_list[brp_i].value);
1453 if (retval != ERROR_OK)
1454 return retval;
1455 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1456 LOG_DEBUG("Invalid BRP number in breakpoint");
1457 return ERROR_OK;
1458 }
1459 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1460 brp_list[brp_j].control, brp_list[brp_j].value);
1461 brp_list[brp_j].used = 0;
1462 brp_list[brp_j].value = 0;
1463 brp_list[brp_j].control = 0;
1464 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1465 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1466 brp_list[brp_j].control);
1467 if (retval != ERROR_OK)
1468 return retval;
1469 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1470 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1471 (uint32_t)brp_list[brp_j].value);
1472 if (retval != ERROR_OK)
1473 return retval;
1474 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1475 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1476 (uint32_t)brp_list[brp_j].value);
1477 if (retval != ERROR_OK)
1478 return retval;
1479
1480 breakpoint->linked_BRP = 0;
1481 breakpoint->set = 0;
1482 return ERROR_OK;
1483
1484 } else {
1485 int brp_i = breakpoint->set - 1;
1486 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1487 LOG_DEBUG("Invalid BRP number in breakpoint");
1488 return ERROR_OK;
1489 }
1490 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1491 brp_list[brp_i].control, brp_list[brp_i].value);
1492 brp_list[brp_i].used = 0;
1493 brp_list[brp_i].value = 0;
1494 brp_list[brp_i].control = 0;
1495 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1496 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1497 brp_list[brp_i].control);
1498 if (retval != ERROR_OK)
1499 return retval;
1500 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1501 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1502 brp_list[brp_i].value);
1503 if (retval != ERROR_OK)
1504 return retval;
1505
1506 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1507 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1508 (uint32_t)brp_list[brp_i].value);
1509 if (retval != ERROR_OK)
1510 return retval;
1511 breakpoint->set = 0;
1512 return ERROR_OK;
1513 }
1514 } else {
1515 /* restore original instruction (kept in target endianness) */
1516
1517 armv8_cache_d_inner_flush_virt(armv8,
1518 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1519 breakpoint->length);
1520
1521 if (breakpoint->length == 4) {
1522 retval = target_write_memory(target,
1523 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1524 4, 1, breakpoint->orig_instr);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 } else {
1528 retval = target_write_memory(target,
1529 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1530 2, 1, breakpoint->orig_instr);
1531 if (retval != ERROR_OK)
1532 return retval;
1533 }
1534
1535 armv8_cache_d_inner_flush_virt(armv8,
1536 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1537 breakpoint->length);
1538
1539 armv8_cache_i_inner_inval_virt(armv8,
1540 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1541 breakpoint->length);
1542 }
1543 breakpoint->set = 0;
1544
1545 return ERROR_OK;
1546 }
1547
1548 static int aarch64_add_breakpoint(struct target *target,
1549 struct breakpoint *breakpoint)
1550 {
1551 struct aarch64_common *aarch64 = target_to_aarch64(target);
1552
1553 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1554 LOG_INFO("no hardware breakpoint available");
1555 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1556 }
1557
1558 if (breakpoint->type == BKPT_HARD)
1559 aarch64->brp_num_available--;
1560
1561 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1562 }
1563
1564 static int aarch64_add_context_breakpoint(struct target *target,
1565 struct breakpoint *breakpoint)
1566 {
1567 struct aarch64_common *aarch64 = target_to_aarch64(target);
1568
1569 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1570 LOG_INFO("no hardware breakpoint available");
1571 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1572 }
1573
1574 if (breakpoint->type == BKPT_HARD)
1575 aarch64->brp_num_available--;
1576
1577 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1578 }
1579
1580 static int aarch64_add_hybrid_breakpoint(struct target *target,
1581 struct breakpoint *breakpoint)
1582 {
1583 struct aarch64_common *aarch64 = target_to_aarch64(target);
1584
1585 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1586 LOG_INFO("no hardware breakpoint available");
1587 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1588 }
1589
1590 if (breakpoint->type == BKPT_HARD)
1591 aarch64->brp_num_available--;
1592
1593 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1594 }
1595
1596
1597 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1598 {
1599 struct aarch64_common *aarch64 = target_to_aarch64(target);
1600
1601 #if 0
1602 /* It is perfectly possible to remove breakpoints while the target is running */
1603 if (target->state != TARGET_HALTED) {
1604 LOG_WARNING("target not halted");
1605 return ERROR_TARGET_NOT_HALTED;
1606 }
1607 #endif
1608
1609 if (breakpoint->set) {
1610 aarch64_unset_breakpoint(target, breakpoint);
1611 if (breakpoint->type == BKPT_HARD)
1612 aarch64->brp_num_available++;
1613 }
1614
1615 return ERROR_OK;
1616 }
1617
1618 /*
1619 * Cortex-A8 Reset functions
1620 */
1621
1622 static int aarch64_assert_reset(struct target *target)
1623 {
1624 struct armv8_common *armv8 = target_to_armv8(target);
1625
1626 LOG_DEBUG(" ");
1627
1628 /* FIXME when halt is requested, make it work somehow... */
1629
1630 /* Issue some kind of warm reset. */
1631 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1632 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1633 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1634 /* REVISIT handle "pulls" cases, if there's
1635 * hardware that needs them to work.
1636 */
1637 jtag_add_reset(0, 1);
1638 } else {
1639 LOG_ERROR("%s: how to reset?", target_name(target));
1640 return ERROR_FAIL;
1641 }
1642
1643 /* registers are now invalid */
1644 if (target_was_examined(target)) {
1645 register_cache_invalidate(armv8->arm.core_cache);
1646 register_cache_invalidate(armv8->arm.core_cache->next);
1647 }
1648
1649 target->state = TARGET_RESET;
1650
1651 return ERROR_OK;
1652 }
1653
1654 static int aarch64_deassert_reset(struct target *target)
1655 {
1656 int retval;
1657
1658 LOG_DEBUG(" ");
1659
1660 /* be certain SRST is off */
1661 jtag_add_reset(0, 0);
1662
1663 if (!target_was_examined(target))
1664 return ERROR_OK;
1665
1666 retval = aarch64_poll(target);
1667 if (retval != ERROR_OK)
1668 return retval;
1669
1670 if (target->reset_halt) {
1671 if (target->state != TARGET_HALTED) {
1672 LOG_WARNING("%s: ran after reset and before halt ...",
1673 target_name(target));
1674 retval = target_halt(target);
1675 if (retval != ERROR_OK)
1676 return retval;
1677 }
1678 }
1679
1680 return aarch64_init_debug_access(target);
1681 }
1682
1683 static int aarch64_write_cpu_memory_slow(struct target *target,
1684 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1685 {
1686 struct armv8_common *armv8 = target_to_armv8(target);
1687 struct arm_dpm *dpm = &armv8->dpm;
1688 struct arm *arm = &armv8->arm;
1689 int retval;
1690
1691 armv8_reg_current(arm, 1)->dirty = true;
1692
1693 /* change DCC to normal mode if necessary */
1694 if (*dscr & DSCR_MA) {
1695 *dscr &= ~DSCR_MA;
1696 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1697 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1698 if (retval != ERROR_OK)
1699 return retval;
1700 }
1701
1702 while (count) {
1703 uint32_t data, opcode;
1704
1705 /* write the data to store into DTRRX */
1706 if (size == 1)
1707 data = *buffer;
1708 else if (size == 2)
1709 data = target_buffer_get_u16(target, buffer);
1710 else
1711 data = target_buffer_get_u32(target, buffer);
1712 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1713 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1714 if (retval != ERROR_OK)
1715 return retval;
1716
1717 if (arm->core_state == ARM_STATE_AARCH64)
1718 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1719 else
1720 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1721 if (retval != ERROR_OK)
1722 return retval;
1723
1724 if (size == 1)
1725 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1726 else if (size == 2)
1727 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1728 else
1729 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1730 retval = dpm->instr_execute(dpm, opcode);
1731 if (retval != ERROR_OK)
1732 return retval;
1733
1734 /* Advance */
1735 buffer += size;
1736 --count;
1737 }
1738
1739 return ERROR_OK;
1740 }
1741
1742 static int aarch64_write_cpu_memory_fast(struct target *target,
1743 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1744 {
1745 struct armv8_common *armv8 = target_to_armv8(target);
1746 struct arm *arm = &armv8->arm;
1747 int retval;
1748
1749 armv8_reg_current(arm, 1)->dirty = true;
1750
1751 /* Step 1.d - Change DCC to memory mode */
1752 *dscr |= DSCR_MA;
1753 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1754 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1755 if (retval != ERROR_OK)
1756 return retval;
1757
1758
1759 /* Step 2.a - Do the write */
1760 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1761 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1762 if (retval != ERROR_OK)
1763 return retval;
1764
1765 /* Step 3.a - Switch DTR mode back to Normal mode */
1766 *dscr &= ~DSCR_MA;
1767 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1768 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1769 if (retval != ERROR_OK)
1770 return retval;
1771
1772 return ERROR_OK;
1773 }
1774
1775 static int aarch64_write_cpu_memory(struct target *target,
1776 uint64_t address, uint32_t size,
1777 uint32_t count, const uint8_t *buffer)
1778 {
1779 /* write memory through APB-AP */
1780 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1781 struct armv8_common *armv8 = target_to_armv8(target);
1782 struct arm_dpm *dpm = &armv8->dpm;
1783 struct arm *arm = &armv8->arm;
1784 uint32_t dscr;
1785
1786 if (target->state != TARGET_HALTED) {
1787 LOG_WARNING("target not halted");
1788 return ERROR_TARGET_NOT_HALTED;
1789 }
1790
1791 /* Mark register X0 as dirty, as it will be used
1792 * for transferring the data.
1793 * It will be restored automatically when exiting
1794 * debug mode
1795 */
1796 armv8_reg_current(arm, 0)->dirty = true;
1797
1798 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1799
1800 /* Read DSCR */
1801 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1802 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1803 if (retval != ERROR_OK)
1804 return retval;
1805
1806 /* Set Normal access mode */
1807 dscr = (dscr & ~DSCR_MA);
1808 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1809 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1810
1811 if (arm->core_state == ARM_STATE_AARCH64) {
1812 /* Write X0 with value 'address' using write procedure */
1813 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1814 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1815 retval = dpm->instr_write_data_dcc_64(dpm,
1816 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1817 } else {
1818 /* Write R0 with value 'address' using write procedure */
1819 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1820 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1821 dpm->instr_write_data_dcc(dpm,
1822 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1823 }
1824
1825 if (size == 4 && (address % 4) == 0)
1826 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1827 else
1828 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1829
1830 if (retval != ERROR_OK) {
1831 /* Unset DTR mode */
1832 mem_ap_read_atomic_u32(armv8->debug_ap,
1833 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1834 dscr &= ~DSCR_MA;
1835 mem_ap_write_atomic_u32(armv8->debug_ap,
1836 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1837 }
1838
1839 /* Check for sticky abort flags in the DSCR */
1840 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1841 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1842 if (retval != ERROR_OK)
1843 return retval;
1844
1845 dpm->dscr = dscr;
1846 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1847 /* Abort occurred - clear it and exit */
1848 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1849 armv8_dpm_handle_exception(dpm);
1850 return ERROR_FAIL;
1851 }
1852
1853 /* Done */
1854 return ERROR_OK;
1855 }
1856
1857 static int aarch64_read_cpu_memory_slow(struct target *target,
1858 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1859 {
1860 struct armv8_common *armv8 = target_to_armv8(target);
1861 struct arm_dpm *dpm = &armv8->dpm;
1862 struct arm *arm = &armv8->arm;
1863 int retval;
1864
1865 armv8_reg_current(arm, 1)->dirty = true;
1866
1867 /* change DCC to normal mode (if necessary) */
1868 if (*dscr & DSCR_MA) {
1869 *dscr &= DSCR_MA;
1870 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1871 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1872 if (retval != ERROR_OK)
1873 return retval;
1874 }
1875
1876 while (count) {
1877 uint32_t opcode, data;
1878
1879 if (size == 1)
1880 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1881 else if (size == 2)
1882 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1883 else
1884 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1885 retval = dpm->instr_execute(dpm, opcode);
1886 if (retval != ERROR_OK)
1887 return retval;
1888
1889 if (arm->core_state == ARM_STATE_AARCH64)
1890 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1891 else
1892 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1893 if (retval != ERROR_OK)
1894 return retval;
1895
1896 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1897 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1898 if (retval != ERROR_OK)
1899 return retval;
1900
1901 if (size == 1)
1902 *buffer = (uint8_t)data;
1903 else if (size == 2)
1904 target_buffer_set_u16(target, buffer, (uint16_t)data);
1905 else
1906 target_buffer_set_u32(target, buffer, data);
1907
1908 /* Advance */
1909 buffer += size;
1910 --count;
1911 }
1912
1913 return ERROR_OK;
1914 }
1915
1916 static int aarch64_read_cpu_memory_fast(struct target *target,
1917 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1918 {
1919 struct armv8_common *armv8 = target_to_armv8(target);
1920 struct arm_dpm *dpm = &armv8->dpm;
1921 struct arm *arm = &armv8->arm;
1922 int retval;
1923 uint32_t value;
1924
1925 /* Mark X1 as dirty */
1926 armv8_reg_current(arm, 1)->dirty = true;
1927
1928 if (arm->core_state == ARM_STATE_AARCH64) {
1929 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1930 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1931 } else {
1932 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1933 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1934 }
1935
1936 /* Step 1.e - Change DCC to memory mode */
1937 *dscr |= DSCR_MA;
1938 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1939 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1940 /* Step 1.f - read DBGDTRTX and discard the value */
1941 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1942 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1943
1944 count--;
1945 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1946 * Abort flags are sticky, so can be read at end of transactions
1947 *
1948 * This data is read in aligned to 32 bit boundary.
1949 */
1950
1951 if (count) {
1952 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1953 * increments X0 by 4. */
1954 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1955 armv8->debug_base + CPUV8_DBG_DTRTX);
1956 if (retval != ERROR_OK)
1957 return retval;
1958 }
1959
1960 /* Step 3.a - set DTR access mode back to Normal mode */
1961 *dscr &= ~DSCR_MA;
1962 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1963 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1964 if (retval != ERROR_OK)
1965 return retval;
1966
1967 /* Step 3.b - read DBGDTRTX for the final value */
1968 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1969 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1970 if (retval != ERROR_OK)
1971 return retval;
1972
1973 target_buffer_set_u32(target, buffer + count * 4, value);
1974 return retval;
1975 }
1976
1977 static int aarch64_read_cpu_memory(struct target *target,
1978 target_addr_t address, uint32_t size,
1979 uint32_t count, uint8_t *buffer)
1980 {
1981 /* read memory through APB-AP */
1982 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1983 struct armv8_common *armv8 = target_to_armv8(target);
1984 struct arm_dpm *dpm = &armv8->dpm;
1985 struct arm *arm = &armv8->arm;
1986 uint32_t dscr;
1987
1988 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
1989 address, size, count);
1990
1991 if (target->state != TARGET_HALTED) {
1992 LOG_WARNING("target not halted");
1993 return ERROR_TARGET_NOT_HALTED;
1994 }
1995
1996 /* Mark register X0 as dirty, as it will be used
1997 * for transferring the data.
1998 * It will be restored automatically when exiting
1999 * debug mode
2000 */
2001 armv8_reg_current(arm, 0)->dirty = true;
2002
2003 /* Read DSCR */
2004 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2005 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2006
2007 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2008
2009 /* Set Normal access mode */
2010 dscr &= ~DSCR_MA;
2011 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2012 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2013
2014 if (arm->core_state == ARM_STATE_AARCH64) {
2015 /* Write X0 with value 'address' using write procedure */
2016 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2017 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2018 retval += dpm->instr_write_data_dcc_64(dpm,
2019 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2020 } else {
2021 /* Write R0 with value 'address' using write procedure */
2022 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2023 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2024 retval += dpm->instr_write_data_dcc(dpm,
2025 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2026 }
2027
2028 if (size == 4 && (address % 4) == 0)
2029 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2030 else
2031 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2032
2033 if (dscr & DSCR_MA) {
2034 dscr &= ~DSCR_MA;
2035 mem_ap_write_atomic_u32(armv8->debug_ap,
2036 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2037 }
2038
2039 if (retval != ERROR_OK)
2040 return retval;
2041
2042 /* Check for sticky abort flags in the DSCR */
2043 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2044 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2045 if (retval != ERROR_OK)
2046 return retval;
2047
2048 dpm->dscr = dscr;
2049
2050 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2051 /* Abort occurred - clear it and exit */
2052 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2053 armv8_dpm_handle_exception(dpm);
2054 return ERROR_FAIL;
2055 }
2056
2057 /* Done */
2058 return ERROR_OK;
2059 }
2060
2061 static int aarch64_read_phys_memory(struct target *target,
2062 target_addr_t address, uint32_t size,
2063 uint32_t count, uint8_t *buffer)
2064 {
2065 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2066
2067 if (count && buffer) {
2068 /* read memory through APB-AP */
2069 retval = aarch64_mmu_modify(target, 0);
2070 if (retval != ERROR_OK)
2071 return retval;
2072 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2073 }
2074 return retval;
2075 }
2076
2077 static int aarch64_read_memory(struct target *target, target_addr_t address,
2078 uint32_t size, uint32_t count, uint8_t *buffer)
2079 {
2080 int mmu_enabled = 0;
2081 int retval;
2082
2083 /* determine if MMU was enabled on target stop */
2084 retval = aarch64_mmu(target, &mmu_enabled);
2085 if (retval != ERROR_OK)
2086 return retval;
2087
2088 if (mmu_enabled) {
2089 /* enable MMU as we could have disabled it for phys access */
2090 retval = aarch64_mmu_modify(target, 1);
2091 if (retval != ERROR_OK)
2092 return retval;
2093 }
2094 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2095 }
2096
2097 static int aarch64_write_phys_memory(struct target *target,
2098 target_addr_t address, uint32_t size,
2099 uint32_t count, const uint8_t *buffer)
2100 {
2101 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2102
2103 if (count && buffer) {
2104 /* write memory through APB-AP */
2105 retval = aarch64_mmu_modify(target, 0);
2106 if (retval != ERROR_OK)
2107 return retval;
2108 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2109 }
2110
2111 return retval;
2112 }
2113
2114 static int aarch64_write_memory(struct target *target, target_addr_t address,
2115 uint32_t size, uint32_t count, const uint8_t *buffer)
2116 {
2117 int mmu_enabled = 0;
2118 int retval;
2119
2120 /* determine if MMU was enabled on target stop */
2121 retval = aarch64_mmu(target, &mmu_enabled);
2122 if (retval != ERROR_OK)
2123 return retval;
2124
2125 if (mmu_enabled) {
2126 /* enable MMU as we could have disabled it for phys access */
2127 retval = aarch64_mmu_modify(target, 1);
2128 if (retval != ERROR_OK)
2129 return retval;
2130 }
2131 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2132 }
2133
2134 static int aarch64_handle_target_request(void *priv)
2135 {
2136 struct target *target = priv;
2137 struct armv8_common *armv8 = target_to_armv8(target);
2138 int retval;
2139
2140 if (!target_was_examined(target))
2141 return ERROR_OK;
2142 if (!target->dbg_msg_enabled)
2143 return ERROR_OK;
2144
2145 if (target->state == TARGET_RUNNING) {
2146 uint32_t request;
2147 uint32_t dscr;
2148 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2149 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2150
2151 /* check if we have data */
2152 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2153 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2154 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2155 if (retval == ERROR_OK) {
2156 target_request(target, request);
2157 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2158 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2159 }
2160 }
2161 }
2162
2163 return ERROR_OK;
2164 }
2165
2166 static int aarch64_examine_first(struct target *target)
2167 {
2168 struct aarch64_common *aarch64 = target_to_aarch64(target);
2169 struct armv8_common *armv8 = &aarch64->armv8_common;
2170 struct adiv5_dap *swjdp = armv8->arm.dap;
2171 uint32_t cti_base;
2172 int i;
2173 int retval = ERROR_OK;
2174 uint64_t debug, ttypr;
2175 uint32_t cpuid;
2176 uint32_t tmp0, tmp1, tmp2, tmp3;
2177 debug = ttypr = cpuid = 0;
2178
2179 retval = dap_dp_init(swjdp);
2180 if (retval != ERROR_OK)
2181 return retval;
2182
2183 /* Search for the APB-AB - it is needed for access to debug registers */
2184 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2185 if (retval != ERROR_OK) {
2186 LOG_ERROR("Could not find APB-AP for debug access");
2187 return retval;
2188 }
2189
2190 retval = mem_ap_init(armv8->debug_ap);
2191 if (retval != ERROR_OK) {
2192 LOG_ERROR("Could not initialize the APB-AP");
2193 return retval;
2194 }
2195
2196 armv8->debug_ap->memaccess_tck = 10;
2197
2198 if (!target->dbgbase_set) {
2199 uint32_t dbgbase;
2200 /* Get ROM Table base */
2201 uint32_t apid;
2202 int32_t coreidx = target->coreid;
2203 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2204 if (retval != ERROR_OK)
2205 return retval;
2206 /* Lookup 0x15 -- Processor DAP */
2207 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2208 &armv8->debug_base, &coreidx);
2209 if (retval != ERROR_OK)
2210 return retval;
2211 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2212 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2213 } else
2214 armv8->debug_base = target->dbgbase;
2215
2216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2217 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2218 if (retval != ERROR_OK) {
2219 LOG_DEBUG("Examine %s failed", "oslock");
2220 return retval;
2221 }
2222
2223 retval = mem_ap_read_u32(armv8->debug_ap,
2224 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2225 if (retval != ERROR_OK) {
2226 LOG_DEBUG("Examine %s failed", "CPUID");
2227 return retval;
2228 }
2229
2230 retval = mem_ap_read_u32(armv8->debug_ap,
2231 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2232 retval += mem_ap_read_u32(armv8->debug_ap,
2233 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2234 if (retval != ERROR_OK) {
2235 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2236 return retval;
2237 }
2238 retval = mem_ap_read_u32(armv8->debug_ap,
2239 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2240 retval += mem_ap_read_u32(armv8->debug_ap,
2241 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2242 if (retval != ERROR_OK) {
2243 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2244 return retval;
2245 }
2246
2247 retval = dap_run(armv8->debug_ap->dap);
2248 if (retval != ERROR_OK) {
2249 LOG_ERROR("%s: examination failed\n", target_name(target));
2250 return retval;
2251 }
2252
2253 ttypr |= tmp1;
2254 ttypr = (ttypr << 32) | tmp0;
2255 debug |= tmp3;
2256 debug = (debug << 32) | tmp2;
2257
2258 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2259 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2260 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2261
2262 if (target->ctibase == 0) {
2263 /* assume a v8 rom table layout */
2264 cti_base = armv8->debug_base + 0x10000;
2265 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, cti_base);
2266 } else
2267 cti_base = target->ctibase;
2268
2269 armv8->cti = arm_cti_create(armv8->debug_ap, cti_base);
2270 if (armv8->cti == NULL)
2271 return ERROR_FAIL;
2272
2273 retval = aarch64_dpm_setup(aarch64, debug);
2274 if (retval != ERROR_OK)
2275 return retval;
2276
2277 /* Setup Breakpoint Register Pairs */
2278 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2279 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2280 aarch64->brp_num_available = aarch64->brp_num;
2281 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2282 for (i = 0; i < aarch64->brp_num; i++) {
2283 aarch64->brp_list[i].used = 0;
2284 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2285 aarch64->brp_list[i].type = BRP_NORMAL;
2286 else
2287 aarch64->brp_list[i].type = BRP_CONTEXT;
2288 aarch64->brp_list[i].value = 0;
2289 aarch64->brp_list[i].control = 0;
2290 aarch64->brp_list[i].BRPn = i;
2291 }
2292
2293 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2294
2295 target->state = TARGET_UNKNOWN;
2296 target->debug_reason = DBG_REASON_NOTHALTED;
2297 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2298 target_set_examined(target);
2299 return ERROR_OK;
2300 }
2301
2302 static int aarch64_examine(struct target *target)
2303 {
2304 int retval = ERROR_OK;
2305
2306 /* don't re-probe hardware after each reset */
2307 if (!target_was_examined(target))
2308 retval = aarch64_examine_first(target);
2309
2310 /* Configure core debug access */
2311 if (retval == ERROR_OK)
2312 retval = aarch64_init_debug_access(target);
2313
2314 return retval;
2315 }
2316
2317 /*
2318 * Cortex-A8 target creation and initialization
2319 */
2320
2321 static int aarch64_init_target(struct command_context *cmd_ctx,
2322 struct target *target)
2323 {
2324 /* examine_first() does a bunch of this */
2325 return ERROR_OK;
2326 }
2327
2328 static int aarch64_init_arch_info(struct target *target,
2329 struct aarch64_common *aarch64, struct jtag_tap *tap)
2330 {
2331 struct armv8_common *armv8 = &aarch64->armv8_common;
2332
2333 /* Setup struct aarch64_common */
2334 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2335 /* tap has no dap initialized */
2336 if (!tap->dap) {
2337 tap->dap = dap_init();
2338 tap->dap->tap = tap;
2339 }
2340 armv8->arm.dap = tap->dap;
2341
2342 /* register arch-specific functions */
2343 armv8->examine_debug_reason = NULL;
2344 armv8->post_debug_entry = aarch64_post_debug_entry;
2345 armv8->pre_restore_context = NULL;
2346 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2347
2348 armv8_init_arch_info(target, armv8);
2349 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2350
2351 return ERROR_OK;
2352 }
2353
2354 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2355 {
2356 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2357
2358 return aarch64_init_arch_info(target, aarch64, target->tap);
2359 }
2360
2361 static int aarch64_mmu(struct target *target, int *enabled)
2362 {
2363 if (target->state != TARGET_HALTED) {
2364 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2365 return ERROR_TARGET_INVALID;
2366 }
2367
2368 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2369 return ERROR_OK;
2370 }
2371
2372 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2373 target_addr_t *phys)
2374 {
2375 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2376 }
2377
2378 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2379 {
2380 struct target *target = get_current_target(CMD_CTX);
2381 struct armv8_common *armv8 = target_to_armv8(target);
2382
2383 return armv8_handle_cache_info_command(CMD_CTX,
2384 &armv8->armv8_mmu.armv8_cache);
2385 }
2386
2387
2388 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2389 {
2390 struct target *target = get_current_target(CMD_CTX);
2391 if (!target_was_examined(target)) {
2392 LOG_ERROR("target not examined yet");
2393 return ERROR_FAIL;
2394 }
2395
2396 return aarch64_init_debug_access(target);
2397 }
2398 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2399 {
2400 struct target *target = get_current_target(CMD_CTX);
2401 /* check target is an smp target */
2402 struct target_list *head;
2403 struct target *curr;
2404 head = target->head;
2405 target->smp = 0;
2406 if (head != (struct target_list *)NULL) {
2407 while (head != (struct target_list *)NULL) {
2408 curr = head->target;
2409 curr->smp = 0;
2410 head = head->next;
2411 }
2412 /* fixes the target display to the debugger */
2413 target->gdb_service->target = target;
2414 }
2415 return ERROR_OK;
2416 }
2417
2418 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2419 {
2420 struct target *target = get_current_target(CMD_CTX);
2421 struct target_list *head;
2422 struct target *curr;
2423 head = target->head;
2424 if (head != (struct target_list *)NULL) {
2425 target->smp = 1;
2426 while (head != (struct target_list *)NULL) {
2427 curr = head->target;
2428 curr->smp = 1;
2429 head = head->next;
2430 }
2431 }
2432 return ERROR_OK;
2433 }
2434
2435 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2436 {
2437 struct target *target = get_current_target(CMD_CTX);
2438 struct aarch64_common *aarch64 = target_to_aarch64(target);
2439
2440 static const Jim_Nvp nvp_maskisr_modes[] = {
2441 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2442 { .name = "on", .value = AARCH64_ISRMASK_ON },
2443 { .name = NULL, .value = -1 },
2444 };
2445 const Jim_Nvp *n;
2446
2447 if (CMD_ARGC > 0) {
2448 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2449 if (n->name == NULL) {
2450 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2451 return ERROR_COMMAND_SYNTAX_ERROR;
2452 }
2453
2454 aarch64->isrmasking_mode = n->value;
2455 }
2456
2457 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2458 command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2459
2460 return ERROR_OK;
2461 }
2462
2463 static const struct command_registration aarch64_exec_command_handlers[] = {
2464 {
2465 .name = "cache_info",
2466 .handler = aarch64_handle_cache_info_command,
2467 .mode = COMMAND_EXEC,
2468 .help = "display information about target caches",
2469 .usage = "",
2470 },
2471 {
2472 .name = "dbginit",
2473 .handler = aarch64_handle_dbginit_command,
2474 .mode = COMMAND_EXEC,
2475 .help = "Initialize core debug",
2476 .usage = "",
2477 },
2478 { .name = "smp_off",
2479 .handler = aarch64_handle_smp_off_command,
2480 .mode = COMMAND_EXEC,
2481 .help = "Stop smp handling",
2482 .usage = "",
2483 },
2484 {
2485 .name = "smp_on",
2486 .handler = aarch64_handle_smp_on_command,
2487 .mode = COMMAND_EXEC,
2488 .help = "Restart smp handling",
2489 .usage = "",
2490 },
2491 {
2492 .name = "maskisr",
2493 .handler = aarch64_mask_interrupts_command,
2494 .mode = COMMAND_ANY,
2495 .help = "mask aarch64 interrupts during single-step",
2496 .usage = "['on'|'off']",
2497 },
2498
2499 COMMAND_REGISTRATION_DONE
2500 };
2501 static const struct command_registration aarch64_command_handlers[] = {
2502 {
2503 .chain = armv8_command_handlers,
2504 },
2505 {
2506 .name = "aarch64",
2507 .mode = COMMAND_ANY,
2508 .help = "Aarch64 command group",
2509 .usage = "",
2510 .chain = aarch64_exec_command_handlers,
2511 },
2512 COMMAND_REGISTRATION_DONE
2513 };
2514
2515 struct target_type aarch64_target = {
2516 .name = "aarch64",
2517
2518 .poll = aarch64_poll,
2519 .arch_state = armv8_arch_state,
2520
2521 .halt = aarch64_halt,
2522 .resume = aarch64_resume,
2523 .step = aarch64_step,
2524
2525 .assert_reset = aarch64_assert_reset,
2526 .deassert_reset = aarch64_deassert_reset,
2527
2528 /* REVISIT allow exporting VFP3 registers ... */
2529 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2530
2531 .read_memory = aarch64_read_memory,
2532 .write_memory = aarch64_write_memory,
2533
2534 .add_breakpoint = aarch64_add_breakpoint,
2535 .add_context_breakpoint = aarch64_add_context_breakpoint,
2536 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2537 .remove_breakpoint = aarch64_remove_breakpoint,
2538 .add_watchpoint = NULL,
2539 .remove_watchpoint = NULL,
2540
2541 .commands = aarch64_command_handlers,
2542 .target_create = aarch64_target_create,
2543 .init_target = aarch64_init_target,
2544 .examine = aarch64_examine,
2545
2546 .read_phys_memory = aarch64_read_phys_memory,
2547 .write_phys_memory = aarch64_write_phys_memory,
2548 .mmu = aarch64_mmu,
2549 .virt2phys = aarch64_virt2phys,
2550 };