cd835027bd5c80e9819e3edbeb7b19916e372bde
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 enum restart_mode {
34 RESTART_LAZY,
35 RESTART_SYNC,
36 };
37
38 enum halt_mode {
39 HALT_LAZY,
40 HALT_SYNC,
41 };
42
43 struct aarch64_private_config {
44 struct adiv5_private_config adiv5_config;
45 struct arm_cti *cti;
46 };
47
48 static int aarch64_poll(struct target *target);
49 static int aarch64_debug_entry(struct target *target);
50 static int aarch64_restore_context(struct target *target, bool bpwp);
51 static int aarch64_set_breakpoint(struct target *target,
52 struct breakpoint *breakpoint, uint8_t matchmode);
53 static int aarch64_set_context_breakpoint(struct target *target,
54 struct breakpoint *breakpoint, uint8_t matchmode);
55 static int aarch64_set_hybrid_breakpoint(struct target *target,
56 struct breakpoint *breakpoint);
57 static int aarch64_unset_breakpoint(struct target *target,
58 struct breakpoint *breakpoint);
59 static int aarch64_mmu(struct target *target, int *enabled);
60 static int aarch64_virt2phys(struct target *target,
61 target_addr_t virt, target_addr_t *phys);
62 static int aarch64_read_cpu_memory(struct target *target,
63 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
64
65 #define foreach_smp_target(pos, head) \
66 for (pos = head; (pos != NULL); pos = pos->next)
67
68 static int aarch64_restore_system_control_reg(struct target *target)
69 {
70 enum arm_mode target_mode = ARM_MODE_ANY;
71 int retval = ERROR_OK;
72 uint32_t instr;
73
74 struct aarch64_common *aarch64 = target_to_aarch64(target);
75 struct armv8_common *armv8 = target_to_armv8(target);
76
77 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
78 aarch64->system_control_reg_curr = aarch64->system_control_reg;
79 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
80
81 switch (armv8->arm.core_mode) {
82 case ARMV8_64_EL0T:
83 target_mode = ARMV8_64_EL1H;
84 /* fall through */
85 case ARMV8_64_EL1T:
86 case ARMV8_64_EL1H:
87 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
88 break;
89 case ARMV8_64_EL2T:
90 case ARMV8_64_EL2H:
91 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
92 break;
93 case ARMV8_64_EL3H:
94 case ARMV8_64_EL3T:
95 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
96 break;
97
98 case ARM_MODE_SVC:
99 case ARM_MODE_ABT:
100 case ARM_MODE_FIQ:
101 case ARM_MODE_IRQ:
102 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
103 break;
104
105 default:
106 LOG_INFO("cannot read system control register in this mode");
107 return ERROR_FAIL;
108 }
109
110 if (target_mode != ARM_MODE_ANY)
111 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
112
113 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
114 if (retval != ERROR_OK)
115 return retval;
116
117 if (target_mode != ARM_MODE_ANY)
118 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
119 }
120
121 return retval;
122 }
123
124 /* modify system_control_reg in order to enable or disable mmu for :
125 * - virt2phys address conversion
126 * - read or write memory in phys or virt address */
127 static int aarch64_mmu_modify(struct target *target, int enable)
128 {
129 struct aarch64_common *aarch64 = target_to_aarch64(target);
130 struct armv8_common *armv8 = &aarch64->armv8_common;
131 int retval = ERROR_OK;
132 uint32_t instr = 0;
133
134 if (enable) {
135 /* if mmu enabled at target stop and mmu not enable */
136 if (!(aarch64->system_control_reg & 0x1U)) {
137 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
138 return ERROR_FAIL;
139 }
140 if (!(aarch64->system_control_reg_curr & 0x1U))
141 aarch64->system_control_reg_curr |= 0x1U;
142 } else {
143 if (aarch64->system_control_reg_curr & 0x4U) {
144 /* data cache is active */
145 aarch64->system_control_reg_curr &= ~0x4U;
146 /* flush data cache armv8 function to be called */
147 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
148 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
149 }
150 if ((aarch64->system_control_reg_curr & 0x1U)) {
151 aarch64->system_control_reg_curr &= ~0x1U;
152 }
153 }
154
155 switch (armv8->arm.core_mode) {
156 case ARMV8_64_EL0T:
157 case ARMV8_64_EL1T:
158 case ARMV8_64_EL1H:
159 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
160 break;
161 case ARMV8_64_EL2T:
162 case ARMV8_64_EL2H:
163 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
164 break;
165 case ARMV8_64_EL3H:
166 case ARMV8_64_EL3T:
167 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
168 break;
169
170 case ARM_MODE_SVC:
171 case ARM_MODE_ABT:
172 case ARM_MODE_FIQ:
173 case ARM_MODE_IRQ:
174 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
175 break;
176
177 default:
178 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
179 break;
180 }
181
182 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
183 aarch64->system_control_reg_curr);
184 return retval;
185 }
186
187 /*
188 * Basic debug access, very low level assumes state is saved
189 */
190 static int aarch64_init_debug_access(struct target *target)
191 {
192 struct armv8_common *armv8 = target_to_armv8(target);
193 int retval;
194 uint32_t dummy;
195
196 LOG_DEBUG("%s", target_name(target));
197
198 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
199 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
200 if (retval != ERROR_OK) {
201 LOG_DEBUG("Examine %s failed", "oslock");
202 return retval;
203 }
204
205 /* Clear Sticky Power Down status Bit in PRSR to enable access to
206 the registers in the Core Power Domain */
207 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
208 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
209 if (retval != ERROR_OK)
210 return retval;
211
212 /*
213 * Static CTI configuration:
214 * Channel 0 -> trigger outputs HALT request to PE
215 * Channel 1 -> trigger outputs Resume request to PE
216 * Gate all channel trigger events from entering the CTM
217 */
218
219 /* Enable CTI */
220 retval = arm_cti_enable(armv8->cti, true);
221 /* By default, gate all channel events to and from the CTM */
222 if (retval == ERROR_OK)
223 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
224 /* output halt requests to PE on channel 0 event */
225 if (retval == ERROR_OK)
226 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
227 /* output restart requests to PE on channel 1 event */
228 if (retval == ERROR_OK)
229 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
230 if (retval != ERROR_OK)
231 return retval;
232
233 /* Resync breakpoint registers */
234
235 return ERROR_OK;
236 }
237
238 /* Write to memory mapped registers directly with no cache or mmu handling */
239 static int aarch64_dap_write_memap_register_u32(struct target *target,
240 uint32_t address,
241 uint32_t value)
242 {
243 int retval;
244 struct armv8_common *armv8 = target_to_armv8(target);
245
246 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
247
248 return retval;
249 }
250
251 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
252 {
253 struct arm_dpm *dpm = &a8->armv8_common.dpm;
254 int retval;
255
256 dpm->arm = &a8->armv8_common.arm;
257 dpm->didr = debug;
258
259 retval = armv8_dpm_setup(dpm);
260 if (retval == ERROR_OK)
261 retval = armv8_dpm_initialize(dpm);
262
263 return retval;
264 }
265
266 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
267 {
268 struct armv8_common *armv8 = target_to_armv8(target);
269 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
270 }
271
272 static int aarch64_check_state_one(struct target *target,
273 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
274 {
275 struct armv8_common *armv8 = target_to_armv8(target);
276 uint32_t prsr;
277 int retval;
278
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
281 if (retval != ERROR_OK)
282 return retval;
283
284 if (p_prsr)
285 *p_prsr = prsr;
286
287 if (p_result)
288 *p_result = (prsr & mask) == (val & mask);
289
290 return ERROR_OK;
291 }
292
293 static int aarch64_wait_halt_one(struct target *target)
294 {
295 int retval = ERROR_OK;
296 uint32_t prsr;
297
298 int64_t then = timeval_ms();
299 for (;;) {
300 int halted;
301
302 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
303 if (retval != ERROR_OK || halted)
304 break;
305
306 if (timeval_ms() > then + 1000) {
307 retval = ERROR_TARGET_TIMEOUT;
308 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
309 break;
310 }
311 }
312 return retval;
313 }
314
315 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
316 {
317 int retval = ERROR_OK;
318 struct target_list *head = target->head;
319 struct target *first = NULL;
320
321 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
322
323 while (head != NULL) {
324 struct target *curr = head->target;
325 struct armv8_common *armv8 = target_to_armv8(curr);
326 head = head->next;
327
328 if (exc_target && curr == target)
329 continue;
330 if (!target_was_examined(curr))
331 continue;
332 if (curr->state != TARGET_RUNNING)
333 continue;
334
335 /* HACK: mark this target as prepared for halting */
336 curr->debug_reason = DBG_REASON_DBGRQ;
337
338 /* open the gate for channel 0 to let HALT requests pass to the CTM */
339 retval = arm_cti_ungate_channel(armv8->cti, 0);
340 if (retval == ERROR_OK)
341 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
342 if (retval != ERROR_OK)
343 break;
344
345 LOG_DEBUG("target %s prepared", target_name(curr));
346
347 if (first == NULL)
348 first = curr;
349 }
350
351 if (p_first) {
352 if (exc_target && first)
353 *p_first = first;
354 else
355 *p_first = target;
356 }
357
358 return retval;
359 }
360
361 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
362 {
363 int retval = ERROR_OK;
364 struct armv8_common *armv8 = target_to_armv8(target);
365
366 LOG_DEBUG("%s", target_name(target));
367
368 /* allow Halting Debug Mode */
369 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
370 if (retval != ERROR_OK)
371 return retval;
372
373 /* trigger an event on channel 0, this outputs a halt request to the PE */
374 retval = arm_cti_pulse_channel(armv8->cti, 0);
375 if (retval != ERROR_OK)
376 return retval;
377
378 if (mode == HALT_SYNC) {
379 retval = aarch64_wait_halt_one(target);
380 if (retval != ERROR_OK) {
381 if (retval == ERROR_TARGET_TIMEOUT)
382 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
383 return retval;
384 }
385 }
386
387 return ERROR_OK;
388 }
389
390 static int aarch64_halt_smp(struct target *target, bool exc_target)
391 {
392 struct target *next = target;
393 int retval;
394
395 /* prepare halt on all PEs of the group */
396 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
397
398 if (exc_target && next == target)
399 return retval;
400
401 /* halt the target PE */
402 if (retval == ERROR_OK)
403 retval = aarch64_halt_one(next, HALT_LAZY);
404
405 if (retval != ERROR_OK)
406 return retval;
407
408 /* wait for all PEs to halt */
409 int64_t then = timeval_ms();
410 for (;;) {
411 bool all_halted = true;
412 struct target_list *head;
413 struct target *curr;
414
415 foreach_smp_target(head, target->head) {
416 int halted;
417
418 curr = head->target;
419
420 if (!target_was_examined(curr))
421 continue;
422
423 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
424 if (retval != ERROR_OK || !halted) {
425 all_halted = false;
426 break;
427 }
428 }
429
430 if (all_halted)
431 break;
432
433 if (timeval_ms() > then + 1000) {
434 retval = ERROR_TARGET_TIMEOUT;
435 break;
436 }
437
438 /*
439 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
440 * and it looks like the CTI's are not connected by a common
441 * trigger matrix. It seems that we need to halt one core in each
442 * cluster explicitly. So if we find that a core has not halted
443 * yet, we trigger an explicit halt for the second cluster.
444 */
445 retval = aarch64_halt_one(curr, HALT_LAZY);
446 if (retval != ERROR_OK)
447 break;
448 }
449
450 return retval;
451 }
452
453 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
454 {
455 struct target *gdb_target = NULL;
456 struct target_list *head;
457 struct target *curr;
458
459 if (debug_reason == DBG_REASON_NOTHALTED) {
460 LOG_DEBUG("Halting remaining targets in SMP group");
461 aarch64_halt_smp(target, true);
462 }
463
464 /* poll all targets in the group, but skip the target that serves GDB */
465 foreach_smp_target(head, target->head) {
466 curr = head->target;
467 /* skip calling context */
468 if (curr == target)
469 continue;
470 if (!target_was_examined(curr))
471 continue;
472 /* skip targets that were already halted */
473 if (curr->state == TARGET_HALTED)
474 continue;
475 /* remember the gdb_service->target */
476 if (curr->gdb_service != NULL)
477 gdb_target = curr->gdb_service->target;
478 /* skip it */
479 if (curr == gdb_target)
480 continue;
481
482 /* avoid recursion in aarch64_poll() */
483 curr->smp = 0;
484 aarch64_poll(curr);
485 curr->smp = 1;
486 }
487
488 /* after all targets were updated, poll the gdb serving target */
489 if (gdb_target != NULL && gdb_target != target)
490 aarch64_poll(gdb_target);
491
492 return ERROR_OK;
493 }
494
495 /*
496 * Aarch64 Run control
497 */
498
499 static int aarch64_poll(struct target *target)
500 {
501 enum target_state prev_target_state;
502 int retval = ERROR_OK;
503 int halted;
504
505 retval = aarch64_check_state_one(target,
506 PRSR_HALT, PRSR_HALT, &halted, NULL);
507 if (retval != ERROR_OK)
508 return retval;
509
510 if (halted) {
511 prev_target_state = target->state;
512 if (prev_target_state != TARGET_HALTED) {
513 enum target_debug_reason debug_reason = target->debug_reason;
514
515 /* We have a halting debug event */
516 target->state = TARGET_HALTED;
517 LOG_DEBUG("Target %s halted", target_name(target));
518 retval = aarch64_debug_entry(target);
519 if (retval != ERROR_OK)
520 return retval;
521
522 if (target->smp)
523 update_halt_gdb(target, debug_reason);
524
525 switch (prev_target_state) {
526 case TARGET_RUNNING:
527 case TARGET_UNKNOWN:
528 case TARGET_RESET:
529 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
530 break;
531 case TARGET_DEBUG_RUNNING:
532 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
533 break;
534 default:
535 break;
536 }
537 }
538 } else
539 target->state = TARGET_RUNNING;
540
541 return retval;
542 }
543
544 static int aarch64_halt(struct target *target)
545 {
546 if (target->smp)
547 return aarch64_halt_smp(target, false);
548
549 return aarch64_halt_one(target, HALT_SYNC);
550 }
551
552 static int aarch64_restore_one(struct target *target, int current,
553 uint64_t *address, int handle_breakpoints, int debug_execution)
554 {
555 struct armv8_common *armv8 = target_to_armv8(target);
556 struct arm *arm = &armv8->arm;
557 int retval;
558 uint64_t resume_pc;
559
560 LOG_DEBUG("%s", target_name(target));
561
562 if (!debug_execution)
563 target_free_all_working_areas(target);
564
565 /* current = 1: continue on current pc, otherwise continue at <address> */
566 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
567 if (!current)
568 resume_pc = *address;
569 else
570 *address = resume_pc;
571
572 /* Make sure that the Armv7 gdb thumb fixups does not
573 * kill the return address
574 */
575 switch (arm->core_state) {
576 case ARM_STATE_ARM:
577 resume_pc &= 0xFFFFFFFC;
578 break;
579 case ARM_STATE_AARCH64:
580 resume_pc &= 0xFFFFFFFFFFFFFFFC;
581 break;
582 case ARM_STATE_THUMB:
583 case ARM_STATE_THUMB_EE:
584 /* When the return address is loaded into PC
585 * bit 0 must be 1 to stay in Thumb state
586 */
587 resume_pc |= 0x1;
588 break;
589 case ARM_STATE_JAZELLE:
590 LOG_ERROR("How do I resume into Jazelle state??");
591 return ERROR_FAIL;
592 }
593 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
594 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
595 arm->pc->dirty = 1;
596 arm->pc->valid = 1;
597
598 /* called it now before restoring context because it uses cpu
599 * register r0 for restoring system control register */
600 retval = aarch64_restore_system_control_reg(target);
601 if (retval == ERROR_OK)
602 retval = aarch64_restore_context(target, handle_breakpoints);
603
604 return retval;
605 }
606
607 /**
608 * prepare single target for restart
609 *
610 *
611 */
612 static int aarch64_prepare_restart_one(struct target *target)
613 {
614 struct armv8_common *armv8 = target_to_armv8(target);
615 int retval;
616 uint32_t dscr;
617 uint32_t tmp;
618
619 LOG_DEBUG("%s", target_name(target));
620
621 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
622 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
623 if (retval != ERROR_OK)
624 return retval;
625
626 if ((dscr & DSCR_ITE) == 0)
627 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
628 if ((dscr & DSCR_ERR) != 0)
629 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
630
631 /* acknowledge a pending CTI halt event */
632 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
633 /*
634 * open the CTI gate for channel 1 so that the restart events
635 * get passed along to all PEs. Also close gate for channel 0
636 * to isolate the PE from halt events.
637 */
638 if (retval == ERROR_OK)
639 retval = arm_cti_ungate_channel(armv8->cti, 1);
640 if (retval == ERROR_OK)
641 retval = arm_cti_gate_channel(armv8->cti, 0);
642
643 /* make sure that DSCR.HDE is set */
644 if (retval == ERROR_OK) {
645 dscr |= DSCR_HDE;
646 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
647 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
648 }
649
650 if (retval == ERROR_OK) {
651 /* clear sticky bits in PRSR, SDR is now 0 */
652 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
653 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
654 }
655
656 return retval;
657 }
658
659 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
660 {
661 struct armv8_common *armv8 = target_to_armv8(target);
662 int retval;
663
664 LOG_DEBUG("%s", target_name(target));
665
666 /* trigger an event on channel 1, generates a restart request to the PE */
667 retval = arm_cti_pulse_channel(armv8->cti, 1);
668 if (retval != ERROR_OK)
669 return retval;
670
671 if (mode == RESTART_SYNC) {
672 int64_t then = timeval_ms();
673 for (;;) {
674 int resumed;
675 /*
676 * if PRSR.SDR is set now, the target did restart, even
677 * if it's now already halted again (e.g. due to breakpoint)
678 */
679 retval = aarch64_check_state_one(target,
680 PRSR_SDR, PRSR_SDR, &resumed, NULL);
681 if (retval != ERROR_OK || resumed)
682 break;
683
684 if (timeval_ms() > then + 1000) {
685 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
686 retval = ERROR_TARGET_TIMEOUT;
687 break;
688 }
689 }
690 }
691
692 if (retval != ERROR_OK)
693 return retval;
694
695 target->debug_reason = DBG_REASON_NOTHALTED;
696 target->state = TARGET_RUNNING;
697
698 return ERROR_OK;
699 }
700
701 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
702 {
703 int retval;
704
705 LOG_DEBUG("%s", target_name(target));
706
707 retval = aarch64_prepare_restart_one(target);
708 if (retval == ERROR_OK)
709 retval = aarch64_do_restart_one(target, mode);
710
711 return retval;
712 }
713
714 /*
715 * prepare all but the current target for restart
716 */
717 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
718 {
719 int retval = ERROR_OK;
720 struct target_list *head;
721 struct target *first = NULL;
722 uint64_t address;
723
724 foreach_smp_target(head, target->head) {
725 struct target *curr = head->target;
726
727 /* skip calling target */
728 if (curr == target)
729 continue;
730 if (!target_was_examined(curr))
731 continue;
732 if (curr->state != TARGET_HALTED)
733 continue;
734
735 /* resume at current address, not in step mode */
736 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
737 if (retval == ERROR_OK)
738 retval = aarch64_prepare_restart_one(curr);
739 if (retval != ERROR_OK) {
740 LOG_ERROR("failed to restore target %s", target_name(curr));
741 break;
742 }
743 /* remember the first valid target in the group */
744 if (first == NULL)
745 first = curr;
746 }
747
748 if (p_first)
749 *p_first = first;
750
751 return retval;
752 }
753
754
755 static int aarch64_step_restart_smp(struct target *target)
756 {
757 int retval = ERROR_OK;
758 struct target_list *head;
759 struct target *first = NULL;
760
761 LOG_DEBUG("%s", target_name(target));
762
763 retval = aarch64_prep_restart_smp(target, 0, &first);
764 if (retval != ERROR_OK)
765 return retval;
766
767 if (first != NULL)
768 retval = aarch64_do_restart_one(first, RESTART_LAZY);
769 if (retval != ERROR_OK) {
770 LOG_DEBUG("error restarting target %s", target_name(first));
771 return retval;
772 }
773
774 int64_t then = timeval_ms();
775 for (;;) {
776 struct target *curr = target;
777 bool all_resumed = true;
778
779 foreach_smp_target(head, target->head) {
780 uint32_t prsr;
781 int resumed;
782
783 curr = head->target;
784
785 if (curr == target)
786 continue;
787
788 if (!target_was_examined(curr))
789 continue;
790
791 retval = aarch64_check_state_one(curr,
792 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
793 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
794 all_resumed = false;
795 break;
796 }
797
798 if (curr->state != TARGET_RUNNING) {
799 curr->state = TARGET_RUNNING;
800 curr->debug_reason = DBG_REASON_NOTHALTED;
801 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
802 }
803 }
804
805 if (all_resumed)
806 break;
807
808 if (timeval_ms() > then + 1000) {
809 LOG_ERROR("%s: timeout waiting for target resume", __func__);
810 retval = ERROR_TARGET_TIMEOUT;
811 break;
812 }
813 /*
814 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
815 * and it looks like the CTI's are not connected by a common
816 * trigger matrix. It seems that we need to halt one core in each
817 * cluster explicitly. So if we find that a core has not halted
818 * yet, we trigger an explicit resume for the second cluster.
819 */
820 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
821 if (retval != ERROR_OK)
822 break;
823 }
824
825 return retval;
826 }
827
828 static int aarch64_resume(struct target *target, int current,
829 target_addr_t address, int handle_breakpoints, int debug_execution)
830 {
831 int retval = 0;
832 uint64_t addr = address;
833
834 if (target->state != TARGET_HALTED)
835 return ERROR_TARGET_NOT_HALTED;
836
837 /*
838 * If this target is part of a SMP group, prepare the others
839 * targets for resuming. This involves restoring the complete
840 * target register context and setting up CTI gates to accept
841 * resume events from the trigger matrix.
842 */
843 if (target->smp) {
844 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
845 if (retval != ERROR_OK)
846 return retval;
847 }
848
849 /* all targets prepared, restore and restart the current target */
850 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
851 debug_execution);
852 if (retval == ERROR_OK)
853 retval = aarch64_restart_one(target, RESTART_SYNC);
854 if (retval != ERROR_OK)
855 return retval;
856
857 if (target->smp) {
858 int64_t then = timeval_ms();
859 for (;;) {
860 struct target *curr = target;
861 struct target_list *head;
862 bool all_resumed = true;
863
864 foreach_smp_target(head, target->head) {
865 uint32_t prsr;
866 int resumed;
867
868 curr = head->target;
869 if (curr == target)
870 continue;
871 if (!target_was_examined(curr))
872 continue;
873
874 retval = aarch64_check_state_one(curr,
875 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
876 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
877 all_resumed = false;
878 break;
879 }
880
881 if (curr->state != TARGET_RUNNING) {
882 curr->state = TARGET_RUNNING;
883 curr->debug_reason = DBG_REASON_NOTHALTED;
884 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
885 }
886 }
887
888 if (all_resumed)
889 break;
890
891 if (timeval_ms() > then + 1000) {
892 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
893 retval = ERROR_TARGET_TIMEOUT;
894 break;
895 }
896
897 /*
898 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
899 * and it looks like the CTI's are not connected by a common
900 * trigger matrix. It seems that we need to halt one core in each
901 * cluster explicitly. So if we find that a core has not halted
902 * yet, we trigger an explicit resume for the second cluster.
903 */
904 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
905 if (retval != ERROR_OK)
906 break;
907 }
908 }
909
910 if (retval != ERROR_OK)
911 return retval;
912
913 target->debug_reason = DBG_REASON_NOTHALTED;
914
915 if (!debug_execution) {
916 target->state = TARGET_RUNNING;
917 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
918 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
919 } else {
920 target->state = TARGET_DEBUG_RUNNING;
921 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
922 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
923 }
924
925 return ERROR_OK;
926 }
927
928 static int aarch64_debug_entry(struct target *target)
929 {
930 int retval = ERROR_OK;
931 struct armv8_common *armv8 = target_to_armv8(target);
932 struct arm_dpm *dpm = &armv8->dpm;
933 enum arm_state core_state;
934 uint32_t dscr;
935
936 /* make sure to clear all sticky errors */
937 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
938 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
939 if (retval == ERROR_OK)
940 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
941 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
942 if (retval == ERROR_OK)
943 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
944
945 if (retval != ERROR_OK)
946 return retval;
947
948 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
949
950 dpm->dscr = dscr;
951 core_state = armv8_dpm_get_core_state(dpm);
952 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
953 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
954
955 /* close the CTI gate for all events */
956 if (retval == ERROR_OK)
957 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
958 /* discard async exceptions */
959 if (retval == ERROR_OK)
960 retval = dpm->instr_cpsr_sync(dpm);
961 if (retval != ERROR_OK)
962 return retval;
963
964 /* Examine debug reason */
965 armv8_dpm_report_dscr(dpm, dscr);
966
967 /* save address of instruction that triggered the watchpoint? */
968 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
969 uint32_t tmp;
970 uint64_t wfar = 0;
971
972 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
973 armv8->debug_base + CPUV8_DBG_WFAR1,
974 &tmp);
975 if (retval != ERROR_OK)
976 return retval;
977 wfar = tmp;
978 wfar = (wfar << 32);
979 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
980 armv8->debug_base + CPUV8_DBG_WFAR0,
981 &tmp);
982 if (retval != ERROR_OK)
983 return retval;
984 wfar |= tmp;
985 armv8_dpm_report_wfar(&armv8->dpm, wfar);
986 }
987
988 retval = armv8_dpm_read_current_registers(&armv8->dpm);
989
990 if (retval == ERROR_OK && armv8->post_debug_entry)
991 retval = armv8->post_debug_entry(target);
992
993 return retval;
994 }
995
996 static int aarch64_post_debug_entry(struct target *target)
997 {
998 struct aarch64_common *aarch64 = target_to_aarch64(target);
999 struct armv8_common *armv8 = &aarch64->armv8_common;
1000 int retval;
1001 enum arm_mode target_mode = ARM_MODE_ANY;
1002 uint32_t instr;
1003
1004 switch (armv8->arm.core_mode) {
1005 case ARMV8_64_EL0T:
1006 target_mode = ARMV8_64_EL1H;
1007 /* fall through */
1008 case ARMV8_64_EL1T:
1009 case ARMV8_64_EL1H:
1010 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1011 break;
1012 case ARMV8_64_EL2T:
1013 case ARMV8_64_EL2H:
1014 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1015 break;
1016 case ARMV8_64_EL3H:
1017 case ARMV8_64_EL3T:
1018 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1019 break;
1020
1021 case ARM_MODE_SVC:
1022 case ARM_MODE_ABT:
1023 case ARM_MODE_FIQ:
1024 case ARM_MODE_IRQ:
1025 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1026 break;
1027
1028 default:
1029 LOG_INFO("cannot read system control register in this mode");
1030 return ERROR_FAIL;
1031 }
1032
1033 if (target_mode != ARM_MODE_ANY)
1034 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1035
1036 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1037 if (retval != ERROR_OK)
1038 return retval;
1039
1040 if (target_mode != ARM_MODE_ANY)
1041 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1042
1043 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1044 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1045
1046 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1047 armv8_identify_cache(armv8);
1048 armv8_read_mpidr(armv8);
1049 }
1050
1051 armv8->armv8_mmu.mmu_enabled =
1052 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1053 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1054 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1055 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1056 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1057 return ERROR_OK;
1058 }
1059
1060 /*
1061 * single-step a target
1062 */
1063 static int aarch64_step(struct target *target, int current, target_addr_t address,
1064 int handle_breakpoints)
1065 {
1066 struct armv8_common *armv8 = target_to_armv8(target);
1067 struct aarch64_common *aarch64 = target_to_aarch64(target);
1068 int saved_retval = ERROR_OK;
1069 int retval;
1070 uint32_t edecr;
1071
1072 if (target->state != TARGET_HALTED) {
1073 LOG_WARNING("target not halted");
1074 return ERROR_TARGET_NOT_HALTED;
1075 }
1076
1077 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1078 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1079 /* make sure EDECR.SS is not set when restoring the register */
1080
1081 if (retval == ERROR_OK) {
1082 edecr &= ~0x4;
1083 /* set EDECR.SS to enter hardware step mode */
1084 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1085 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1086 }
1087 /* disable interrupts while stepping */
1088 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1089 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1090 /* bail out if stepping setup has failed */
1091 if (retval != ERROR_OK)
1092 return retval;
1093
1094 if (target->smp && (current == 1)) {
1095 /*
1096 * isolate current target so that it doesn't get resumed
1097 * together with the others
1098 */
1099 retval = arm_cti_gate_channel(armv8->cti, 1);
1100 /* resume all other targets in the group */
1101 if (retval == ERROR_OK)
1102 retval = aarch64_step_restart_smp(target);
1103 if (retval != ERROR_OK) {
1104 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1105 return retval;
1106 }
1107 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1108 }
1109
1110 /* all other targets running, restore and restart the current target */
1111 retval = aarch64_restore_one(target, current, &address, 0, 0);
1112 if (retval == ERROR_OK)
1113 retval = aarch64_restart_one(target, RESTART_LAZY);
1114
1115 if (retval != ERROR_OK)
1116 return retval;
1117
1118 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1119 if (!handle_breakpoints)
1120 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1121
1122 int64_t then = timeval_ms();
1123 for (;;) {
1124 int stepped;
1125 uint32_t prsr;
1126
1127 retval = aarch64_check_state_one(target,
1128 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1129 if (retval != ERROR_OK || stepped)
1130 break;
1131
1132 if (timeval_ms() > then + 100) {
1133 LOG_ERROR("timeout waiting for target %s halt after step",
1134 target_name(target));
1135 retval = ERROR_TARGET_TIMEOUT;
1136 break;
1137 }
1138 }
1139
1140 /*
1141 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1142 * causes a timeout. The core takes the step but doesn't complete it and so
1143 * debug state is never entered. However, you can manually halt the core
1144 * as an external debug even is also a WFI wakeup event.
1145 */
1146 if (retval == ERROR_TARGET_TIMEOUT)
1147 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1148
1149 /* restore EDECR */
1150 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1151 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1152 if (retval != ERROR_OK)
1153 return retval;
1154
1155 /* restore interrupts */
1156 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1157 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1158 if (retval != ERROR_OK)
1159 return ERROR_OK;
1160 }
1161
1162 if (saved_retval != ERROR_OK)
1163 return saved_retval;
1164
1165 return aarch64_poll(target);
1166 }
1167
1168 static int aarch64_restore_context(struct target *target, bool bpwp)
1169 {
1170 struct armv8_common *armv8 = target_to_armv8(target);
1171 struct arm *arm = &armv8->arm;
1172
1173 int retval;
1174
1175 LOG_DEBUG("%s", target_name(target));
1176
1177 if (armv8->pre_restore_context)
1178 armv8->pre_restore_context(target);
1179
1180 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1181 if (retval == ERROR_OK) {
1182 /* registers are now invalid */
1183 register_cache_invalidate(arm->core_cache);
1184 register_cache_invalidate(arm->core_cache->next);
1185 }
1186
1187 return retval;
1188 }
1189
1190 /*
1191 * Cortex-A8 Breakpoint and watchpoint functions
1192 */
1193
1194 /* Setup hardware Breakpoint Register Pair */
1195 static int aarch64_set_breakpoint(struct target *target,
1196 struct breakpoint *breakpoint, uint8_t matchmode)
1197 {
1198 int retval;
1199 int brp_i = 0;
1200 uint32_t control;
1201 uint8_t byte_addr_select = 0x0F;
1202 struct aarch64_common *aarch64 = target_to_aarch64(target);
1203 struct armv8_common *armv8 = &aarch64->armv8_common;
1204 struct aarch64_brp *brp_list = aarch64->brp_list;
1205
1206 if (breakpoint->set) {
1207 LOG_WARNING("breakpoint already set");
1208 return ERROR_OK;
1209 }
1210
1211 if (breakpoint->type == BKPT_HARD) {
1212 int64_t bpt_value;
1213 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1214 brp_i++;
1215 if (brp_i >= aarch64->brp_num) {
1216 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1217 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1218 }
1219 breakpoint->set = brp_i + 1;
1220 if (breakpoint->length == 2)
1221 byte_addr_select = (3 << (breakpoint->address & 0x02));
1222 control = ((matchmode & 0x7) << 20)
1223 | (1 << 13)
1224 | (byte_addr_select << 5)
1225 | (3 << 1) | 1;
1226 brp_list[brp_i].used = 1;
1227 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1228 brp_list[brp_i].control = control;
1229 bpt_value = brp_list[brp_i].value;
1230
1231 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1232 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1233 (uint32_t)(bpt_value & 0xFFFFFFFF));
1234 if (retval != ERROR_OK)
1235 return retval;
1236 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1237 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1238 (uint32_t)(bpt_value >> 32));
1239 if (retval != ERROR_OK)
1240 return retval;
1241
1242 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1243 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1244 brp_list[brp_i].control);
1245 if (retval != ERROR_OK)
1246 return retval;
1247 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1248 brp_list[brp_i].control,
1249 brp_list[brp_i].value);
1250
1251 } else if (breakpoint->type == BKPT_SOFT) {
1252 uint8_t code[4];
1253
1254 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1255 retval = target_read_memory(target,
1256 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1257 breakpoint->length, 1,
1258 breakpoint->orig_instr);
1259 if (retval != ERROR_OK)
1260 return retval;
1261
1262 armv8_cache_d_inner_flush_virt(armv8,
1263 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1264 breakpoint->length);
1265
1266 retval = target_write_memory(target,
1267 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1268 breakpoint->length, 1, code);
1269 if (retval != ERROR_OK)
1270 return retval;
1271
1272 armv8_cache_d_inner_flush_virt(armv8,
1273 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1274 breakpoint->length);
1275
1276 armv8_cache_i_inner_inval_virt(armv8,
1277 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1278 breakpoint->length);
1279
1280 breakpoint->set = 0x11; /* Any nice value but 0 */
1281 }
1282
1283 /* Ensure that halting debug mode is enable */
1284 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1285 if (retval != ERROR_OK) {
1286 LOG_DEBUG("Failed to set DSCR.HDE");
1287 return retval;
1288 }
1289
1290 return ERROR_OK;
1291 }
1292
1293 static int aarch64_set_context_breakpoint(struct target *target,
1294 struct breakpoint *breakpoint, uint8_t matchmode)
1295 {
1296 int retval = ERROR_FAIL;
1297 int brp_i = 0;
1298 uint32_t control;
1299 uint8_t byte_addr_select = 0x0F;
1300 struct aarch64_common *aarch64 = target_to_aarch64(target);
1301 struct armv8_common *armv8 = &aarch64->armv8_common;
1302 struct aarch64_brp *brp_list = aarch64->brp_list;
1303
1304 if (breakpoint->set) {
1305 LOG_WARNING("breakpoint already set");
1306 return retval;
1307 }
1308 /*check available context BRPs*/
1309 while ((brp_list[brp_i].used ||
1310 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1311 brp_i++;
1312
1313 if (brp_i >= aarch64->brp_num) {
1314 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1315 return ERROR_FAIL;
1316 }
1317
1318 breakpoint->set = brp_i + 1;
1319 control = ((matchmode & 0x7) << 20)
1320 | (1 << 13)
1321 | (byte_addr_select << 5)
1322 | (3 << 1) | 1;
1323 brp_list[brp_i].used = 1;
1324 brp_list[brp_i].value = (breakpoint->asid);
1325 brp_list[brp_i].control = control;
1326 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1327 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1328 brp_list[brp_i].value);
1329 if (retval != ERROR_OK)
1330 return retval;
1331 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1332 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1333 brp_list[brp_i].control);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1337 brp_list[brp_i].control,
1338 brp_list[brp_i].value);
1339 return ERROR_OK;
1340
1341 }
1342
1343 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1344 {
1345 int retval = ERROR_FAIL;
1346 int brp_1 = 0; /* holds the contextID pair */
1347 int brp_2 = 0; /* holds the IVA pair */
1348 uint32_t control_CTX, control_IVA;
1349 uint8_t CTX_byte_addr_select = 0x0F;
1350 uint8_t IVA_byte_addr_select = 0x0F;
1351 uint8_t CTX_machmode = 0x03;
1352 uint8_t IVA_machmode = 0x01;
1353 struct aarch64_common *aarch64 = target_to_aarch64(target);
1354 struct armv8_common *armv8 = &aarch64->armv8_common;
1355 struct aarch64_brp *brp_list = aarch64->brp_list;
1356
1357 if (breakpoint->set) {
1358 LOG_WARNING("breakpoint already set");
1359 return retval;
1360 }
1361 /*check available context BRPs*/
1362 while ((brp_list[brp_1].used ||
1363 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1364 brp_1++;
1365
1366 printf("brp(CTX) found num: %d\n", brp_1);
1367 if (brp_1 >= aarch64->brp_num) {
1368 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1369 return ERROR_FAIL;
1370 }
1371
1372 while ((brp_list[brp_2].used ||
1373 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1374 brp_2++;
1375
1376 printf("brp(IVA) found num: %d\n", brp_2);
1377 if (brp_2 >= aarch64->brp_num) {
1378 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1379 return ERROR_FAIL;
1380 }
1381
1382 breakpoint->set = brp_1 + 1;
1383 breakpoint->linked_BRP = brp_2;
1384 control_CTX = ((CTX_machmode & 0x7) << 20)
1385 | (brp_2 << 16)
1386 | (0 << 14)
1387 | (CTX_byte_addr_select << 5)
1388 | (3 << 1) | 1;
1389 brp_list[brp_1].used = 1;
1390 brp_list[brp_1].value = (breakpoint->asid);
1391 brp_list[brp_1].control = control_CTX;
1392 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1393 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1394 brp_list[brp_1].value);
1395 if (retval != ERROR_OK)
1396 return retval;
1397 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1398 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1399 brp_list[brp_1].control);
1400 if (retval != ERROR_OK)
1401 return retval;
1402
1403 control_IVA = ((IVA_machmode & 0x7) << 20)
1404 | (brp_1 << 16)
1405 | (1 << 13)
1406 | (IVA_byte_addr_select << 5)
1407 | (3 << 1) | 1;
1408 brp_list[brp_2].used = 1;
1409 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1410 brp_list[brp_2].control = control_IVA;
1411 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1412 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1413 brp_list[brp_2].value & 0xFFFFFFFF);
1414 if (retval != ERROR_OK)
1415 return retval;
1416 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1417 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1418 brp_list[brp_2].value >> 32);
1419 if (retval != ERROR_OK)
1420 return retval;
1421 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1422 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1423 brp_list[brp_2].control);
1424 if (retval != ERROR_OK)
1425 return retval;
1426
1427 return ERROR_OK;
1428 }
1429
1430 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1431 {
1432 int retval;
1433 struct aarch64_common *aarch64 = target_to_aarch64(target);
1434 struct armv8_common *armv8 = &aarch64->armv8_common;
1435 struct aarch64_brp *brp_list = aarch64->brp_list;
1436
1437 if (!breakpoint->set) {
1438 LOG_WARNING("breakpoint not set");
1439 return ERROR_OK;
1440 }
1441
1442 if (breakpoint->type == BKPT_HARD) {
1443 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1444 int brp_i = breakpoint->set - 1;
1445 int brp_j = breakpoint->linked_BRP;
1446 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1447 LOG_DEBUG("Invalid BRP number in breakpoint");
1448 return ERROR_OK;
1449 }
1450 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1451 brp_list[brp_i].control, brp_list[brp_i].value);
1452 brp_list[brp_i].used = 0;
1453 brp_list[brp_i].value = 0;
1454 brp_list[brp_i].control = 0;
1455 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1456 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1457 brp_list[brp_i].control);
1458 if (retval != ERROR_OK)
1459 return retval;
1460 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1461 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1462 (uint32_t)brp_list[brp_i].value);
1463 if (retval != ERROR_OK)
1464 return retval;
1465 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1466 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1467 (uint32_t)brp_list[brp_i].value);
1468 if (retval != ERROR_OK)
1469 return retval;
1470 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1471 LOG_DEBUG("Invalid BRP number in breakpoint");
1472 return ERROR_OK;
1473 }
1474 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1475 brp_list[brp_j].control, brp_list[brp_j].value);
1476 brp_list[brp_j].used = 0;
1477 brp_list[brp_j].value = 0;
1478 brp_list[brp_j].control = 0;
1479 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1480 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1481 brp_list[brp_j].control);
1482 if (retval != ERROR_OK)
1483 return retval;
1484 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1485 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1486 (uint32_t)brp_list[brp_j].value);
1487 if (retval != ERROR_OK)
1488 return retval;
1489 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1490 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1491 (uint32_t)brp_list[brp_j].value);
1492 if (retval != ERROR_OK)
1493 return retval;
1494
1495 breakpoint->linked_BRP = 0;
1496 breakpoint->set = 0;
1497 return ERROR_OK;
1498
1499 } else {
1500 int brp_i = breakpoint->set - 1;
1501 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1502 LOG_DEBUG("Invalid BRP number in breakpoint");
1503 return ERROR_OK;
1504 }
1505 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1506 brp_list[brp_i].control, brp_list[brp_i].value);
1507 brp_list[brp_i].used = 0;
1508 brp_list[brp_i].value = 0;
1509 brp_list[brp_i].control = 0;
1510 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1511 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1512 brp_list[brp_i].control);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1516 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1517 brp_list[brp_i].value);
1518 if (retval != ERROR_OK)
1519 return retval;
1520
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1523 (uint32_t)brp_list[brp_i].value);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 breakpoint->set = 0;
1527 return ERROR_OK;
1528 }
1529 } else {
1530 /* restore original instruction (kept in target endianness) */
1531
1532 armv8_cache_d_inner_flush_virt(armv8,
1533 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1534 breakpoint->length);
1535
1536 if (breakpoint->length == 4) {
1537 retval = target_write_memory(target,
1538 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1539 4, 1, breakpoint->orig_instr);
1540 if (retval != ERROR_OK)
1541 return retval;
1542 } else {
1543 retval = target_write_memory(target,
1544 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1545 2, 1, breakpoint->orig_instr);
1546 if (retval != ERROR_OK)
1547 return retval;
1548 }
1549
1550 armv8_cache_d_inner_flush_virt(armv8,
1551 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1552 breakpoint->length);
1553
1554 armv8_cache_i_inner_inval_virt(armv8,
1555 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1556 breakpoint->length);
1557 }
1558 breakpoint->set = 0;
1559
1560 return ERROR_OK;
1561 }
1562
1563 static int aarch64_add_breakpoint(struct target *target,
1564 struct breakpoint *breakpoint)
1565 {
1566 struct aarch64_common *aarch64 = target_to_aarch64(target);
1567
1568 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1569 LOG_INFO("no hardware breakpoint available");
1570 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1571 }
1572
1573 if (breakpoint->type == BKPT_HARD)
1574 aarch64->brp_num_available--;
1575
1576 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1577 }
1578
1579 static int aarch64_add_context_breakpoint(struct target *target,
1580 struct breakpoint *breakpoint)
1581 {
1582 struct aarch64_common *aarch64 = target_to_aarch64(target);
1583
1584 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1585 LOG_INFO("no hardware breakpoint available");
1586 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1587 }
1588
1589 if (breakpoint->type == BKPT_HARD)
1590 aarch64->brp_num_available--;
1591
1592 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1593 }
1594
1595 static int aarch64_add_hybrid_breakpoint(struct target *target,
1596 struct breakpoint *breakpoint)
1597 {
1598 struct aarch64_common *aarch64 = target_to_aarch64(target);
1599
1600 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1601 LOG_INFO("no hardware breakpoint available");
1602 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1603 }
1604
1605 if (breakpoint->type == BKPT_HARD)
1606 aarch64->brp_num_available--;
1607
1608 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1609 }
1610
1611
1612 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1613 {
1614 struct aarch64_common *aarch64 = target_to_aarch64(target);
1615
1616 #if 0
1617 /* It is perfectly possible to remove breakpoints while the target is running */
1618 if (target->state != TARGET_HALTED) {
1619 LOG_WARNING("target not halted");
1620 return ERROR_TARGET_NOT_HALTED;
1621 }
1622 #endif
1623
1624 if (breakpoint->set) {
1625 aarch64_unset_breakpoint(target, breakpoint);
1626 if (breakpoint->type == BKPT_HARD)
1627 aarch64->brp_num_available++;
1628 }
1629
1630 return ERROR_OK;
1631 }
1632
1633 /*
1634 * Cortex-A8 Reset functions
1635 */
1636
1637 static int aarch64_assert_reset(struct target *target)
1638 {
1639 struct armv8_common *armv8 = target_to_armv8(target);
1640
1641 LOG_DEBUG(" ");
1642
1643 /* FIXME when halt is requested, make it work somehow... */
1644
1645 /* Issue some kind of warm reset. */
1646 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1647 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1648 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1649 /* REVISIT handle "pulls" cases, if there's
1650 * hardware that needs them to work.
1651 */
1652 jtag_add_reset(0, 1);
1653 } else {
1654 LOG_ERROR("%s: how to reset?", target_name(target));
1655 return ERROR_FAIL;
1656 }
1657
1658 /* registers are now invalid */
1659 if (target_was_examined(target)) {
1660 register_cache_invalidate(armv8->arm.core_cache);
1661 register_cache_invalidate(armv8->arm.core_cache->next);
1662 }
1663
1664 target->state = TARGET_RESET;
1665
1666 return ERROR_OK;
1667 }
1668
1669 static int aarch64_deassert_reset(struct target *target)
1670 {
1671 int retval;
1672
1673 LOG_DEBUG(" ");
1674
1675 /* be certain SRST is off */
1676 jtag_add_reset(0, 0);
1677
1678 if (!target_was_examined(target))
1679 return ERROR_OK;
1680
1681 retval = aarch64_poll(target);
1682 if (retval != ERROR_OK)
1683 return retval;
1684
1685 if (target->reset_halt) {
1686 if (target->state != TARGET_HALTED) {
1687 LOG_WARNING("%s: ran after reset and before halt ...",
1688 target_name(target));
1689 retval = target_halt(target);
1690 if (retval != ERROR_OK)
1691 return retval;
1692 }
1693 }
1694
1695 return aarch64_init_debug_access(target);
1696 }
1697
1698 static int aarch64_write_cpu_memory_slow(struct target *target,
1699 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1700 {
1701 struct armv8_common *armv8 = target_to_armv8(target);
1702 struct arm_dpm *dpm = &armv8->dpm;
1703 struct arm *arm = &armv8->arm;
1704 int retval;
1705
1706 armv8_reg_current(arm, 1)->dirty = true;
1707
1708 /* change DCC to normal mode if necessary */
1709 if (*dscr & DSCR_MA) {
1710 *dscr &= ~DSCR_MA;
1711 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1712 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1713 if (retval != ERROR_OK)
1714 return retval;
1715 }
1716
1717 while (count) {
1718 uint32_t data, opcode;
1719
1720 /* write the data to store into DTRRX */
1721 if (size == 1)
1722 data = *buffer;
1723 else if (size == 2)
1724 data = target_buffer_get_u16(target, buffer);
1725 else
1726 data = target_buffer_get_u32(target, buffer);
1727 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1728 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1729 if (retval != ERROR_OK)
1730 return retval;
1731
1732 if (arm->core_state == ARM_STATE_AARCH64)
1733 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1734 else
1735 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1736 if (retval != ERROR_OK)
1737 return retval;
1738
1739 if (size == 1)
1740 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1741 else if (size == 2)
1742 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1743 else
1744 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1745 retval = dpm->instr_execute(dpm, opcode);
1746 if (retval != ERROR_OK)
1747 return retval;
1748
1749 /* Advance */
1750 buffer += size;
1751 --count;
1752 }
1753
1754 return ERROR_OK;
1755 }
1756
1757 static int aarch64_write_cpu_memory_fast(struct target *target,
1758 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1759 {
1760 struct armv8_common *armv8 = target_to_armv8(target);
1761 struct arm *arm = &armv8->arm;
1762 int retval;
1763
1764 armv8_reg_current(arm, 1)->dirty = true;
1765
1766 /* Step 1.d - Change DCC to memory mode */
1767 *dscr |= DSCR_MA;
1768 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1769 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1770 if (retval != ERROR_OK)
1771 return retval;
1772
1773
1774 /* Step 2.a - Do the write */
1775 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1776 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1777 if (retval != ERROR_OK)
1778 return retval;
1779
1780 /* Step 3.a - Switch DTR mode back to Normal mode */
1781 *dscr &= ~DSCR_MA;
1782 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1783 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1784 if (retval != ERROR_OK)
1785 return retval;
1786
1787 return ERROR_OK;
1788 }
1789
1790 static int aarch64_write_cpu_memory(struct target *target,
1791 uint64_t address, uint32_t size,
1792 uint32_t count, const uint8_t *buffer)
1793 {
1794 /* write memory through APB-AP */
1795 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1796 struct armv8_common *armv8 = target_to_armv8(target);
1797 struct arm_dpm *dpm = &armv8->dpm;
1798 struct arm *arm = &armv8->arm;
1799 uint32_t dscr;
1800
1801 if (target->state != TARGET_HALTED) {
1802 LOG_WARNING("target not halted");
1803 return ERROR_TARGET_NOT_HALTED;
1804 }
1805
1806 /* Mark register X0 as dirty, as it will be used
1807 * for transferring the data.
1808 * It will be restored automatically when exiting
1809 * debug mode
1810 */
1811 armv8_reg_current(arm, 0)->dirty = true;
1812
1813 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1814
1815 /* Read DSCR */
1816 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1817 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1818 if (retval != ERROR_OK)
1819 return retval;
1820
1821 /* Set Normal access mode */
1822 dscr = (dscr & ~DSCR_MA);
1823 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1824 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1825 if (retval != ERROR_OK)
1826 return retval;
1827
1828 if (arm->core_state == ARM_STATE_AARCH64) {
1829 /* Write X0 with value 'address' using write procedure */
1830 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1831 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1832 retval = dpm->instr_write_data_dcc_64(dpm,
1833 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1834 } else {
1835 /* Write R0 with value 'address' using write procedure */
1836 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1837 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1838 retval = dpm->instr_write_data_dcc(dpm,
1839 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1840 }
1841
1842 if (retval != ERROR_OK)
1843 return retval;
1844
1845 if (size == 4 && (address % 4) == 0)
1846 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1847 else
1848 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1849
1850 if (retval != ERROR_OK) {
1851 /* Unset DTR mode */
1852 mem_ap_read_atomic_u32(armv8->debug_ap,
1853 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1854 dscr &= ~DSCR_MA;
1855 mem_ap_write_atomic_u32(armv8->debug_ap,
1856 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1857 }
1858
1859 /* Check for sticky abort flags in the DSCR */
1860 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1861 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1862 if (retval != ERROR_OK)
1863 return retval;
1864
1865 dpm->dscr = dscr;
1866 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1867 /* Abort occurred - clear it and exit */
1868 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1869 armv8_dpm_handle_exception(dpm, true);
1870 return ERROR_FAIL;
1871 }
1872
1873 /* Done */
1874 return ERROR_OK;
1875 }
1876
1877 static int aarch64_read_cpu_memory_slow(struct target *target,
1878 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1879 {
1880 struct armv8_common *armv8 = target_to_armv8(target);
1881 struct arm_dpm *dpm = &armv8->dpm;
1882 struct arm *arm = &armv8->arm;
1883 int retval;
1884
1885 armv8_reg_current(arm, 1)->dirty = true;
1886
1887 /* change DCC to normal mode (if necessary) */
1888 if (*dscr & DSCR_MA) {
1889 *dscr &= DSCR_MA;
1890 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1891 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1892 if (retval != ERROR_OK)
1893 return retval;
1894 }
1895
1896 while (count) {
1897 uint32_t opcode, data;
1898
1899 if (size == 1)
1900 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1901 else if (size == 2)
1902 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1903 else
1904 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1905 retval = dpm->instr_execute(dpm, opcode);
1906 if (retval != ERROR_OK)
1907 return retval;
1908
1909 if (arm->core_state == ARM_STATE_AARCH64)
1910 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1911 else
1912 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1913 if (retval != ERROR_OK)
1914 return retval;
1915
1916 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1917 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1918 if (retval != ERROR_OK)
1919 return retval;
1920
1921 if (size == 1)
1922 *buffer = (uint8_t)data;
1923 else if (size == 2)
1924 target_buffer_set_u16(target, buffer, (uint16_t)data);
1925 else
1926 target_buffer_set_u32(target, buffer, data);
1927
1928 /* Advance */
1929 buffer += size;
1930 --count;
1931 }
1932
1933 return ERROR_OK;
1934 }
1935
1936 static int aarch64_read_cpu_memory_fast(struct target *target,
1937 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1938 {
1939 struct armv8_common *armv8 = target_to_armv8(target);
1940 struct arm_dpm *dpm = &armv8->dpm;
1941 struct arm *arm = &armv8->arm;
1942 int retval;
1943 uint32_t value;
1944
1945 /* Mark X1 as dirty */
1946 armv8_reg_current(arm, 1)->dirty = true;
1947
1948 if (arm->core_state == ARM_STATE_AARCH64) {
1949 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1950 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1951 } else {
1952 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1953 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1954 }
1955
1956 if (retval != ERROR_OK)
1957 return retval;
1958
1959 /* Step 1.e - Change DCC to memory mode */
1960 *dscr |= DSCR_MA;
1961 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1962 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1963 if (retval != ERROR_OK)
1964 return retval;
1965
1966 /* Step 1.f - read DBGDTRTX and discard the value */
1967 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1968 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1969 if (retval != ERROR_OK)
1970 return retval;
1971
1972 count--;
1973 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1974 * Abort flags are sticky, so can be read at end of transactions
1975 *
1976 * This data is read in aligned to 32 bit boundary.
1977 */
1978
1979 if (count) {
1980 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1981 * increments X0 by 4. */
1982 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1983 armv8->debug_base + CPUV8_DBG_DTRTX);
1984 if (retval != ERROR_OK)
1985 return retval;
1986 }
1987
1988 /* Step 3.a - set DTR access mode back to Normal mode */
1989 *dscr &= ~DSCR_MA;
1990 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1991 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1992 if (retval != ERROR_OK)
1993 return retval;
1994
1995 /* Step 3.b - read DBGDTRTX for the final value */
1996 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1997 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1998 if (retval != ERROR_OK)
1999 return retval;
2000
2001 target_buffer_set_u32(target, buffer + count * 4, value);
2002 return retval;
2003 }
2004
2005 static int aarch64_read_cpu_memory(struct target *target,
2006 target_addr_t address, uint32_t size,
2007 uint32_t count, uint8_t *buffer)
2008 {
2009 /* read memory through APB-AP */
2010 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2011 struct armv8_common *armv8 = target_to_armv8(target);
2012 struct arm_dpm *dpm = &armv8->dpm;
2013 struct arm *arm = &armv8->arm;
2014 uint32_t dscr;
2015
2016 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2017 address, size, count);
2018
2019 if (target->state != TARGET_HALTED) {
2020 LOG_WARNING("target not halted");
2021 return ERROR_TARGET_NOT_HALTED;
2022 }
2023
2024 /* Mark register X0 as dirty, as it will be used
2025 * for transferring the data.
2026 * It will be restored automatically when exiting
2027 * debug mode
2028 */
2029 armv8_reg_current(arm, 0)->dirty = true;
2030
2031 /* Read DSCR */
2032 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2033 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2034 if (retval != ERROR_OK)
2035 return retval;
2036
2037 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2038
2039 /* Set Normal access mode */
2040 dscr &= ~DSCR_MA;
2041 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2042 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 if (arm->core_state == ARM_STATE_AARCH64) {
2047 /* Write X0 with value 'address' using write procedure */
2048 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2049 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2050 retval = dpm->instr_write_data_dcc_64(dpm,
2051 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2052 } else {
2053 /* Write R0 with value 'address' using write procedure */
2054 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2055 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2056 retval = dpm->instr_write_data_dcc(dpm,
2057 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2058 }
2059
2060 if (retval != ERROR_OK)
2061 return retval;
2062
2063 if (size == 4 && (address % 4) == 0)
2064 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2065 else
2066 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2067
2068 if (dscr & DSCR_MA) {
2069 dscr &= ~DSCR_MA;
2070 mem_ap_write_atomic_u32(armv8->debug_ap,
2071 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2072 }
2073
2074 if (retval != ERROR_OK)
2075 return retval;
2076
2077 /* Check for sticky abort flags in the DSCR */
2078 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2079 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2080 if (retval != ERROR_OK)
2081 return retval;
2082
2083 dpm->dscr = dscr;
2084
2085 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2086 /* Abort occurred - clear it and exit */
2087 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2088 armv8_dpm_handle_exception(dpm, true);
2089 return ERROR_FAIL;
2090 }
2091
2092 /* Done */
2093 return ERROR_OK;
2094 }
2095
2096 static int aarch64_read_phys_memory(struct target *target,
2097 target_addr_t address, uint32_t size,
2098 uint32_t count, uint8_t *buffer)
2099 {
2100 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2101
2102 if (count && buffer) {
2103 /* read memory through APB-AP */
2104 retval = aarch64_mmu_modify(target, 0);
2105 if (retval != ERROR_OK)
2106 return retval;
2107 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2108 }
2109 return retval;
2110 }
2111
2112 static int aarch64_read_memory(struct target *target, target_addr_t address,
2113 uint32_t size, uint32_t count, uint8_t *buffer)
2114 {
2115 int mmu_enabled = 0;
2116 int retval;
2117
2118 /* determine if MMU was enabled on target stop */
2119 retval = aarch64_mmu(target, &mmu_enabled);
2120 if (retval != ERROR_OK)
2121 return retval;
2122
2123 if (mmu_enabled) {
2124 /* enable MMU as we could have disabled it for phys access */
2125 retval = aarch64_mmu_modify(target, 1);
2126 if (retval != ERROR_OK)
2127 return retval;
2128 }
2129 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2130 }
2131
2132 static int aarch64_write_phys_memory(struct target *target,
2133 target_addr_t address, uint32_t size,
2134 uint32_t count, const uint8_t *buffer)
2135 {
2136 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2137
2138 if (count && buffer) {
2139 /* write memory through APB-AP */
2140 retval = aarch64_mmu_modify(target, 0);
2141 if (retval != ERROR_OK)
2142 return retval;
2143 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2144 }
2145
2146 return retval;
2147 }
2148
2149 static int aarch64_write_memory(struct target *target, target_addr_t address,
2150 uint32_t size, uint32_t count, const uint8_t *buffer)
2151 {
2152 int mmu_enabled = 0;
2153 int retval;
2154
2155 /* determine if MMU was enabled on target stop */
2156 retval = aarch64_mmu(target, &mmu_enabled);
2157 if (retval != ERROR_OK)
2158 return retval;
2159
2160 if (mmu_enabled) {
2161 /* enable MMU as we could have disabled it for phys access */
2162 retval = aarch64_mmu_modify(target, 1);
2163 if (retval != ERROR_OK)
2164 return retval;
2165 }
2166 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2167 }
2168
2169 static int aarch64_handle_target_request(void *priv)
2170 {
2171 struct target *target = priv;
2172 struct armv8_common *armv8 = target_to_armv8(target);
2173 int retval;
2174
2175 if (!target_was_examined(target))
2176 return ERROR_OK;
2177 if (!target->dbg_msg_enabled)
2178 return ERROR_OK;
2179
2180 if (target->state == TARGET_RUNNING) {
2181 uint32_t request;
2182 uint32_t dscr;
2183 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2184 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2185
2186 /* check if we have data */
2187 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2188 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2189 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2190 if (retval == ERROR_OK) {
2191 target_request(target, request);
2192 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2193 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2194 }
2195 }
2196 }
2197
2198 return ERROR_OK;
2199 }
2200
2201 static int aarch64_examine_first(struct target *target)
2202 {
2203 struct aarch64_common *aarch64 = target_to_aarch64(target);
2204 struct armv8_common *armv8 = &aarch64->armv8_common;
2205 struct adiv5_dap *swjdp = armv8->arm.dap;
2206 struct aarch64_private_config *pc;
2207 int i;
2208 int retval = ERROR_OK;
2209 uint64_t debug, ttypr;
2210 uint32_t cpuid;
2211 uint32_t tmp0, tmp1, tmp2, tmp3;
2212 debug = ttypr = cpuid = 0;
2213
2214 /* Search for the APB-AB - it is needed for access to debug registers */
2215 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2216 if (retval != ERROR_OK) {
2217 LOG_ERROR("Could not find APB-AP for debug access");
2218 return retval;
2219 }
2220
2221 retval = mem_ap_init(armv8->debug_ap);
2222 if (retval != ERROR_OK) {
2223 LOG_ERROR("Could not initialize the APB-AP");
2224 return retval;
2225 }
2226
2227 armv8->debug_ap->memaccess_tck = 10;
2228
2229 if (!target->dbgbase_set) {
2230 uint32_t dbgbase;
2231 /* Get ROM Table base */
2232 uint32_t apid;
2233 int32_t coreidx = target->coreid;
2234 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2235 if (retval != ERROR_OK)
2236 return retval;
2237 /* Lookup 0x15 -- Processor DAP */
2238 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2239 &armv8->debug_base, &coreidx);
2240 if (retval != ERROR_OK)
2241 return retval;
2242 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2243 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2244 } else
2245 armv8->debug_base = target->dbgbase;
2246
2247 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2248 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2249 if (retval != ERROR_OK) {
2250 LOG_DEBUG("Examine %s failed", "oslock");
2251 return retval;
2252 }
2253
2254 retval = mem_ap_read_u32(armv8->debug_ap,
2255 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2256 if (retval != ERROR_OK) {
2257 LOG_DEBUG("Examine %s failed", "CPUID");
2258 return retval;
2259 }
2260
2261 retval = mem_ap_read_u32(armv8->debug_ap,
2262 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2263 retval += mem_ap_read_u32(armv8->debug_ap,
2264 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2265 if (retval != ERROR_OK) {
2266 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2267 return retval;
2268 }
2269 retval = mem_ap_read_u32(armv8->debug_ap,
2270 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2271 retval += mem_ap_read_u32(armv8->debug_ap,
2272 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2273 if (retval != ERROR_OK) {
2274 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2275 return retval;
2276 }
2277
2278 retval = dap_run(armv8->debug_ap->dap);
2279 if (retval != ERROR_OK) {
2280 LOG_ERROR("%s: examination failed\n", target_name(target));
2281 return retval;
2282 }
2283
2284 ttypr |= tmp1;
2285 ttypr = (ttypr << 32) | tmp0;
2286 debug |= tmp3;
2287 debug = (debug << 32) | tmp2;
2288
2289 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2290 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2291 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2292
2293 if (target->private_config == NULL)
2294 return ERROR_FAIL;
2295
2296 pc = (struct aarch64_private_config *)target->private_config;
2297 if (pc->cti == NULL)
2298 return ERROR_FAIL;
2299
2300 armv8->cti = pc->cti;
2301
2302 retval = aarch64_dpm_setup(aarch64, debug);
2303 if (retval != ERROR_OK)
2304 return retval;
2305
2306 /* Setup Breakpoint Register Pairs */
2307 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2308 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2309 aarch64->brp_num_available = aarch64->brp_num;
2310 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2311 for (i = 0; i < aarch64->brp_num; i++) {
2312 aarch64->brp_list[i].used = 0;
2313 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2314 aarch64->brp_list[i].type = BRP_NORMAL;
2315 else
2316 aarch64->brp_list[i].type = BRP_CONTEXT;
2317 aarch64->brp_list[i].value = 0;
2318 aarch64->brp_list[i].control = 0;
2319 aarch64->brp_list[i].BRPn = i;
2320 }
2321
2322 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2323
2324 target->state = TARGET_UNKNOWN;
2325 target->debug_reason = DBG_REASON_NOTHALTED;
2326 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2327 target_set_examined(target);
2328 return ERROR_OK;
2329 }
2330
2331 static int aarch64_examine(struct target *target)
2332 {
2333 int retval = ERROR_OK;
2334
2335 /* don't re-probe hardware after each reset */
2336 if (!target_was_examined(target))
2337 retval = aarch64_examine_first(target);
2338
2339 /* Configure core debug access */
2340 if (retval == ERROR_OK)
2341 retval = aarch64_init_debug_access(target);
2342
2343 return retval;
2344 }
2345
2346 /*
2347 * Cortex-A8 target creation and initialization
2348 */
2349
2350 static int aarch64_init_target(struct command_context *cmd_ctx,
2351 struct target *target)
2352 {
2353 /* examine_first() does a bunch of this */
2354 return ERROR_OK;
2355 }
2356
2357 static int aarch64_init_arch_info(struct target *target,
2358 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2359 {
2360 struct armv8_common *armv8 = &aarch64->armv8_common;
2361
2362 /* Setup struct aarch64_common */
2363 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2364 armv8->arm.dap = dap;
2365
2366 /* register arch-specific functions */
2367 armv8->examine_debug_reason = NULL;
2368 armv8->post_debug_entry = aarch64_post_debug_entry;
2369 armv8->pre_restore_context = NULL;
2370 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2371
2372 armv8_init_arch_info(target, armv8);
2373 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2374
2375 return ERROR_OK;
2376 }
2377
2378 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2379 {
2380 struct aarch64_private_config *pc = target->private_config;
2381 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2382
2383 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2384 return ERROR_FAIL;
2385
2386 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2387 }
2388
2389 static void aarch64_deinit_target(struct target *target)
2390 {
2391 struct aarch64_common *aarch64 = target_to_aarch64(target);
2392 struct armv8_common *armv8 = &aarch64->armv8_common;
2393 struct arm_dpm *dpm = &armv8->dpm;
2394
2395 armv8_free_reg_cache(target);
2396 free(aarch64->brp_list);
2397 free(dpm->dbp);
2398 free(dpm->dwp);
2399 free(target->private_config);
2400 free(aarch64);
2401 }
2402
2403 static int aarch64_mmu(struct target *target, int *enabled)
2404 {
2405 if (target->state != TARGET_HALTED) {
2406 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2407 return ERROR_TARGET_INVALID;
2408 }
2409
2410 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2411 return ERROR_OK;
2412 }
2413
2414 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2415 target_addr_t *phys)
2416 {
2417 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2418 }
2419
2420 /*
2421 * private target configuration items
2422 */
2423 enum aarch64_cfg_param {
2424 CFG_CTI,
2425 };
2426
2427 static const Jim_Nvp nvp_config_opts[] = {
2428 { .name = "-cti", .value = CFG_CTI },
2429 { .name = NULL, .value = -1 }
2430 };
2431
2432 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2433 {
2434 struct aarch64_private_config *pc;
2435 Jim_Nvp *n;
2436 int e;
2437
2438 pc = (struct aarch64_private_config *)target->private_config;
2439 if (pc == NULL) {
2440 pc = calloc(1, sizeof(struct aarch64_private_config));
2441 target->private_config = pc;
2442 }
2443
2444 /*
2445 * Call adiv5_jim_configure() to parse the common DAP options
2446 * It will return JIM_CONTINUE if it didn't find any known
2447 * options, JIM_OK if it correctly parsed the topmost option
2448 * and JIM_ERR if an error occured during parameter evaluation.
2449 * For JIM_CONTINUE, we check our own params.
2450 */
2451 e = adiv5_jim_configure(target, goi);
2452 if (e != JIM_CONTINUE)
2453 return e;
2454
2455 /* parse config or cget options ... */
2456 if (goi->argc > 0) {
2457 Jim_SetEmptyResult(goi->interp);
2458
2459 /* check first if topmost item is for us */
2460 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2461 goi->argv[0], &n);
2462 if (e != JIM_OK)
2463 return JIM_CONTINUE;
2464
2465 e = Jim_GetOpt_Obj(goi, NULL);
2466 if (e != JIM_OK)
2467 return e;
2468
2469 switch (n->value) {
2470 case CFG_CTI: {
2471 if (goi->isconfigure) {
2472 Jim_Obj *o_cti;
2473 struct arm_cti *cti;
2474 e = Jim_GetOpt_Obj(goi, &o_cti);
2475 if (e != JIM_OK)
2476 return e;
2477 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2478 if (cti == NULL) {
2479 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2480 return JIM_ERR;
2481 }
2482 pc->cti = cti;
2483 } else {
2484 if (goi->argc != 0) {
2485 Jim_WrongNumArgs(goi->interp,
2486 goi->argc, goi->argv,
2487 "NO PARAMS");
2488 return JIM_ERR;
2489 }
2490
2491 if (pc == NULL || pc->cti == NULL) {
2492 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2493 return JIM_ERR;
2494 }
2495 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2496 }
2497 break;
2498 }
2499
2500 default:
2501 return JIM_CONTINUE;
2502 }
2503 }
2504
2505 return JIM_OK;
2506 }
2507
2508 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2509 {
2510 struct target *target = get_current_target(CMD_CTX);
2511 struct armv8_common *armv8 = target_to_armv8(target);
2512
2513 return armv8_handle_cache_info_command(CMD_CTX,
2514 &armv8->armv8_mmu.armv8_cache);
2515 }
2516
2517
2518 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2519 {
2520 struct target *target = get_current_target(CMD_CTX);
2521 if (!target_was_examined(target)) {
2522 LOG_ERROR("target not examined yet");
2523 return ERROR_FAIL;
2524 }
2525
2526 return aarch64_init_debug_access(target);
2527 }
2528 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2529 {
2530 struct target *target = get_current_target(CMD_CTX);
2531 /* check target is an smp target */
2532 struct target_list *head;
2533 struct target *curr;
2534 head = target->head;
2535 target->smp = 0;
2536 if (head != (struct target_list *)NULL) {
2537 while (head != (struct target_list *)NULL) {
2538 curr = head->target;
2539 curr->smp = 0;
2540 head = head->next;
2541 }
2542 /* fixes the target display to the debugger */
2543 target->gdb_service->target = target;
2544 }
2545 return ERROR_OK;
2546 }
2547
2548 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2549 {
2550 struct target *target = get_current_target(CMD_CTX);
2551 struct target_list *head;
2552 struct target *curr;
2553 head = target->head;
2554 if (head != (struct target_list *)NULL) {
2555 target->smp = 1;
2556 while (head != (struct target_list *)NULL) {
2557 curr = head->target;
2558 curr->smp = 1;
2559 head = head->next;
2560 }
2561 }
2562 return ERROR_OK;
2563 }
2564
2565 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2566 {
2567 struct target *target = get_current_target(CMD_CTX);
2568 struct aarch64_common *aarch64 = target_to_aarch64(target);
2569
2570 static const Jim_Nvp nvp_maskisr_modes[] = {
2571 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2572 { .name = "on", .value = AARCH64_ISRMASK_ON },
2573 { .name = NULL, .value = -1 },
2574 };
2575 const Jim_Nvp *n;
2576
2577 if (CMD_ARGC > 0) {
2578 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2579 if (n->name == NULL) {
2580 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2581 return ERROR_COMMAND_SYNTAX_ERROR;
2582 }
2583
2584 aarch64->isrmasking_mode = n->value;
2585 }
2586
2587 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2588 command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2589
2590 return ERROR_OK;
2591 }
2592
2593 static const struct command_registration aarch64_exec_command_handlers[] = {
2594 {
2595 .name = "cache_info",
2596 .handler = aarch64_handle_cache_info_command,
2597 .mode = COMMAND_EXEC,
2598 .help = "display information about target caches",
2599 .usage = "",
2600 },
2601 {
2602 .name = "dbginit",
2603 .handler = aarch64_handle_dbginit_command,
2604 .mode = COMMAND_EXEC,
2605 .help = "Initialize core debug",
2606 .usage = "",
2607 },
2608 { .name = "smp_off",
2609 .handler = aarch64_handle_smp_off_command,
2610 .mode = COMMAND_EXEC,
2611 .help = "Stop smp handling",
2612 .usage = "",
2613 },
2614 {
2615 .name = "smp_on",
2616 .handler = aarch64_handle_smp_on_command,
2617 .mode = COMMAND_EXEC,
2618 .help = "Restart smp handling",
2619 .usage = "",
2620 },
2621 {
2622 .name = "maskisr",
2623 .handler = aarch64_mask_interrupts_command,
2624 .mode = COMMAND_ANY,
2625 .help = "mask aarch64 interrupts during single-step",
2626 .usage = "['on'|'off']",
2627 },
2628
2629 COMMAND_REGISTRATION_DONE
2630 };
2631 static const struct command_registration aarch64_command_handlers[] = {
2632 {
2633 .chain = armv8_command_handlers,
2634 },
2635 {
2636 .name = "aarch64",
2637 .mode = COMMAND_ANY,
2638 .help = "Aarch64 command group",
2639 .usage = "",
2640 .chain = aarch64_exec_command_handlers,
2641 },
2642 COMMAND_REGISTRATION_DONE
2643 };
2644
2645 struct target_type aarch64_target = {
2646 .name = "aarch64",
2647
2648 .poll = aarch64_poll,
2649 .arch_state = armv8_arch_state,
2650
2651 .halt = aarch64_halt,
2652 .resume = aarch64_resume,
2653 .step = aarch64_step,
2654
2655 .assert_reset = aarch64_assert_reset,
2656 .deassert_reset = aarch64_deassert_reset,
2657
2658 /* REVISIT allow exporting VFP3 registers ... */
2659 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2660
2661 .read_memory = aarch64_read_memory,
2662 .write_memory = aarch64_write_memory,
2663
2664 .add_breakpoint = aarch64_add_breakpoint,
2665 .add_context_breakpoint = aarch64_add_context_breakpoint,
2666 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2667 .remove_breakpoint = aarch64_remove_breakpoint,
2668 .add_watchpoint = NULL,
2669 .remove_watchpoint = NULL,
2670
2671 .commands = aarch64_command_handlers,
2672 .target_create = aarch64_target_create,
2673 .target_jim_configure = aarch64_jim_configure,
2674 .init_target = aarch64_init_target,
2675 .deinit_target = aarch64_deinit_target,
2676 .examine = aarch64_examine,
2677
2678 .read_phys_memory = aarch64_read_phys_memory,
2679 .write_phys_memory = aarch64_write_phys_memory,
2680 .mmu = aarch64_mmu,
2681 .virt2phys = aarch64_virt2phys,
2682 };