target/aarch64: remove dependency from jtag queue
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include "jtag/interface.h"
33 #include "smp.h"
34 #include <helper/time_support.h>
35
36 enum restart_mode {
37 RESTART_LAZY,
38 RESTART_SYNC,
39 };
40
41 enum halt_mode {
42 HALT_LAZY,
43 HALT_SYNC,
44 };
45
46 struct aarch64_private_config {
47 struct adiv5_private_config adiv5_config;
48 struct arm_cti *cti;
49 };
50
51 static int aarch64_poll(struct target *target);
52 static int aarch64_debug_entry(struct target *target);
53 static int aarch64_restore_context(struct target *target, bool bpwp);
54 static int aarch64_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int aarch64_set_context_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int aarch64_set_hybrid_breakpoint(struct target *target,
59 struct breakpoint *breakpoint);
60 static int aarch64_unset_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int aarch64_mmu(struct target *target, int *enabled);
63 static int aarch64_virt2phys(struct target *target,
64 target_addr_t virt, target_addr_t *phys);
65 static int aarch64_read_cpu_memory(struct target *target,
66 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
67
68 static int aarch64_restore_system_control_reg(struct target *target)
69 {
70 enum arm_mode target_mode = ARM_MODE_ANY;
71 int retval = ERROR_OK;
72 uint32_t instr;
73
74 struct aarch64_common *aarch64 = target_to_aarch64(target);
75 struct armv8_common *armv8 = target_to_armv8(target);
76
77 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
78 aarch64->system_control_reg_curr = aarch64->system_control_reg;
79 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
80
81 switch (armv8->arm.core_mode) {
82 case ARMV8_64_EL0T:
83 target_mode = ARMV8_64_EL1H;
84 /* fall through */
85 case ARMV8_64_EL1T:
86 case ARMV8_64_EL1H:
87 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
88 break;
89 case ARMV8_64_EL2T:
90 case ARMV8_64_EL2H:
91 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
92 break;
93 case ARMV8_64_EL3H:
94 case ARMV8_64_EL3T:
95 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
96 break;
97
98 case ARM_MODE_SVC:
99 case ARM_MODE_ABT:
100 case ARM_MODE_FIQ:
101 case ARM_MODE_IRQ:
102 case ARM_MODE_SYS:
103 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
104 break;
105
106 default:
107 LOG_INFO("cannot read system control register in this mode");
108 return ERROR_FAIL;
109 }
110
111 if (target_mode != ARM_MODE_ANY)
112 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
113
114 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
115 if (retval != ERROR_OK)
116 return retval;
117
118 if (target_mode != ARM_MODE_ANY)
119 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
120 }
121
122 return retval;
123 }
124
125 /* modify system_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int aarch64_mmu_modify(struct target *target, int enable)
129 {
130 struct aarch64_common *aarch64 = target_to_aarch64(target);
131 struct armv8_common *armv8 = &aarch64->armv8_common;
132 int retval = ERROR_OK;
133 uint32_t instr = 0;
134
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(aarch64->system_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(aarch64->system_control_reg_curr & 0x1U))
142 aarch64->system_control_reg_curr |= 0x1U;
143 } else {
144 if (aarch64->system_control_reg_curr & 0x4U) {
145 /* data cache is active */
146 aarch64->system_control_reg_curr &= ~0x4U;
147 /* flush data cache armv8 function to be called */
148 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
149 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
150 }
151 if ((aarch64->system_control_reg_curr & 0x1U)) {
152 aarch64->system_control_reg_curr &= ~0x1U;
153 }
154 }
155
156 switch (armv8->arm.core_mode) {
157 case ARMV8_64_EL0T:
158 case ARMV8_64_EL1T:
159 case ARMV8_64_EL1H:
160 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
161 break;
162 case ARMV8_64_EL2T:
163 case ARMV8_64_EL2H:
164 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
165 break;
166 case ARMV8_64_EL3H:
167 case ARMV8_64_EL3T:
168 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
169 break;
170
171 case ARM_MODE_SVC:
172 case ARM_MODE_ABT:
173 case ARM_MODE_FIQ:
174 case ARM_MODE_IRQ:
175 case ARM_MODE_SYS:
176 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
177 break;
178
179 default:
180 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
181 break;
182 }
183
184 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
185 aarch64->system_control_reg_curr);
186 return retval;
187 }
188
189 /*
190 * Basic debug access, very low level assumes state is saved
191 */
192 static int aarch64_init_debug_access(struct target *target)
193 {
194 struct armv8_common *armv8 = target_to_armv8(target);
195 int retval;
196 uint32_t dummy;
197
198 LOG_DEBUG("%s", target_name(target));
199
200 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
201 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
202 if (retval != ERROR_OK) {
203 LOG_DEBUG("Examine %s failed", "oslock");
204 return retval;
205 }
206
207 /* Clear Sticky Power Down status Bit in PRSR to enable access to
208 the registers in the Core Power Domain */
209 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
210 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
211 if (retval != ERROR_OK)
212 return retval;
213
214 /*
215 * Static CTI configuration:
216 * Channel 0 -> trigger outputs HALT request to PE
217 * Channel 1 -> trigger outputs Resume request to PE
218 * Gate all channel trigger events from entering the CTM
219 */
220
221 /* Enable CTI */
222 retval = arm_cti_enable(armv8->cti, true);
223 /* By default, gate all channel events to and from the CTM */
224 if (retval == ERROR_OK)
225 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
226 /* output halt requests to PE on channel 0 event */
227 if (retval == ERROR_OK)
228 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
229 /* output restart requests to PE on channel 1 event */
230 if (retval == ERROR_OK)
231 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
232 if (retval != ERROR_OK)
233 return retval;
234
235 /* Resync breakpoint registers */
236
237 return ERROR_OK;
238 }
239
240 /* Write to memory mapped registers directly with no cache or mmu handling */
241 static int aarch64_dap_write_memap_register_u32(struct target *target,
242 uint32_t address,
243 uint32_t value)
244 {
245 int retval;
246 struct armv8_common *armv8 = target_to_armv8(target);
247
248 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
249
250 return retval;
251 }
252
253 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
254 {
255 struct arm_dpm *dpm = &a8->armv8_common.dpm;
256 int retval;
257
258 dpm->arm = &a8->armv8_common.arm;
259 dpm->didr = debug;
260
261 retval = armv8_dpm_setup(dpm);
262 if (retval == ERROR_OK)
263 retval = armv8_dpm_initialize(dpm);
264
265 return retval;
266 }
267
268 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
269 {
270 struct armv8_common *armv8 = target_to_armv8(target);
271 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
272 }
273
274 static int aarch64_check_state_one(struct target *target,
275 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
276 {
277 struct armv8_common *armv8 = target_to_armv8(target);
278 uint32_t prsr;
279 int retval;
280
281 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
282 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
283 if (retval != ERROR_OK)
284 return retval;
285
286 if (p_prsr)
287 *p_prsr = prsr;
288
289 if (p_result)
290 *p_result = (prsr & mask) == (val & mask);
291
292 return ERROR_OK;
293 }
294
295 static int aarch64_wait_halt_one(struct target *target)
296 {
297 int retval = ERROR_OK;
298 uint32_t prsr;
299
300 int64_t then = timeval_ms();
301 for (;;) {
302 int halted;
303
304 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
305 if (retval != ERROR_OK || halted)
306 break;
307
308 if (timeval_ms() > then + 1000) {
309 retval = ERROR_TARGET_TIMEOUT;
310 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
311 break;
312 }
313 }
314 return retval;
315 }
316
317 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
318 {
319 int retval = ERROR_OK;
320 struct target_list *head = target->head;
321 struct target *first = NULL;
322
323 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
324
325 while (head != NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328 head = head->next;
329
330 if (exc_target && curr == target)
331 continue;
332 if (!target_was_examined(curr))
333 continue;
334 if (curr->state != TARGET_RUNNING)
335 continue;
336
337 /* HACK: mark this target as prepared for halting */
338 curr->debug_reason = DBG_REASON_DBGRQ;
339
340 /* open the gate for channel 0 to let HALT requests pass to the CTM */
341 retval = arm_cti_ungate_channel(armv8->cti, 0);
342 if (retval == ERROR_OK)
343 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
344 if (retval != ERROR_OK)
345 break;
346
347 LOG_DEBUG("target %s prepared", target_name(curr));
348
349 if (first == NULL)
350 first = curr;
351 }
352
353 if (p_first) {
354 if (exc_target && first)
355 *p_first = first;
356 else
357 *p_first = target;
358 }
359
360 return retval;
361 }
362
363 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
364 {
365 int retval = ERROR_OK;
366 struct armv8_common *armv8 = target_to_armv8(target);
367
368 LOG_DEBUG("%s", target_name(target));
369
370 /* allow Halting Debug Mode */
371 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
372 if (retval != ERROR_OK)
373 return retval;
374
375 /* trigger an event on channel 0, this outputs a halt request to the PE */
376 retval = arm_cti_pulse_channel(armv8->cti, 0);
377 if (retval != ERROR_OK)
378 return retval;
379
380 if (mode == HALT_SYNC) {
381 retval = aarch64_wait_halt_one(target);
382 if (retval != ERROR_OK) {
383 if (retval == ERROR_TARGET_TIMEOUT)
384 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
385 return retval;
386 }
387 }
388
389 return ERROR_OK;
390 }
391
392 static int aarch64_halt_smp(struct target *target, bool exc_target)
393 {
394 struct target *next = target;
395 int retval;
396
397 /* prepare halt on all PEs of the group */
398 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
399
400 if (exc_target && next == target)
401 return retval;
402
403 /* halt the target PE */
404 if (retval == ERROR_OK)
405 retval = aarch64_halt_one(next, HALT_LAZY);
406
407 if (retval != ERROR_OK)
408 return retval;
409
410 /* wait for all PEs to halt */
411 int64_t then = timeval_ms();
412 for (;;) {
413 bool all_halted = true;
414 struct target_list *head;
415 struct target *curr;
416
417 foreach_smp_target(head, target->head) {
418 int halted;
419
420 curr = head->target;
421
422 if (!target_was_examined(curr))
423 continue;
424
425 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
426 if (retval != ERROR_OK || !halted) {
427 all_halted = false;
428 break;
429 }
430 }
431
432 if (all_halted)
433 break;
434
435 if (timeval_ms() > then + 1000) {
436 retval = ERROR_TARGET_TIMEOUT;
437 break;
438 }
439
440 /*
441 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
442 * and it looks like the CTI's are not connected by a common
443 * trigger matrix. It seems that we need to halt one core in each
444 * cluster explicitly. So if we find that a core has not halted
445 * yet, we trigger an explicit halt for the second cluster.
446 */
447 retval = aarch64_halt_one(curr, HALT_LAZY);
448 if (retval != ERROR_OK)
449 break;
450 }
451
452 return retval;
453 }
454
455 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
456 {
457 struct target *gdb_target = NULL;
458 struct target_list *head;
459 struct target *curr;
460
461 if (debug_reason == DBG_REASON_NOTHALTED) {
462 LOG_DEBUG("Halting remaining targets in SMP group");
463 aarch64_halt_smp(target, true);
464 }
465
466 /* poll all targets in the group, but skip the target that serves GDB */
467 foreach_smp_target(head, target->head) {
468 curr = head->target;
469 /* skip calling context */
470 if (curr == target)
471 continue;
472 if (!target_was_examined(curr))
473 continue;
474 /* skip targets that were already halted */
475 if (curr->state == TARGET_HALTED)
476 continue;
477 /* remember the gdb_service->target */
478 if (curr->gdb_service != NULL)
479 gdb_target = curr->gdb_service->target;
480 /* skip it */
481 if (curr == gdb_target)
482 continue;
483
484 /* avoid recursion in aarch64_poll() */
485 curr->smp = 0;
486 aarch64_poll(curr);
487 curr->smp = 1;
488 }
489
490 /* after all targets were updated, poll the gdb serving target */
491 if (gdb_target != NULL && gdb_target != target)
492 aarch64_poll(gdb_target);
493
494 return ERROR_OK;
495 }
496
497 /*
498 * Aarch64 Run control
499 */
500
501 static int aarch64_poll(struct target *target)
502 {
503 enum target_state prev_target_state;
504 int retval = ERROR_OK;
505 int halted;
506
507 retval = aarch64_check_state_one(target,
508 PRSR_HALT, PRSR_HALT, &halted, NULL);
509 if (retval != ERROR_OK)
510 return retval;
511
512 if (halted) {
513 prev_target_state = target->state;
514 if (prev_target_state != TARGET_HALTED) {
515 enum target_debug_reason debug_reason = target->debug_reason;
516
517 /* We have a halting debug event */
518 target->state = TARGET_HALTED;
519 LOG_DEBUG("Target %s halted", target_name(target));
520 retval = aarch64_debug_entry(target);
521 if (retval != ERROR_OK)
522 return retval;
523
524 if (target->smp)
525 update_halt_gdb(target, debug_reason);
526
527 if (arm_semihosting(target, &retval) != 0)
528 return retval;
529
530 switch (prev_target_state) {
531 case TARGET_RUNNING:
532 case TARGET_UNKNOWN:
533 case TARGET_RESET:
534 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
535 break;
536 case TARGET_DEBUG_RUNNING:
537 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
538 break;
539 default:
540 break;
541 }
542 }
543 } else
544 target->state = TARGET_RUNNING;
545
546 return retval;
547 }
548
549 static int aarch64_halt(struct target *target)
550 {
551 struct armv8_common *armv8 = target_to_armv8(target);
552 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
553
554 if (target->smp)
555 return aarch64_halt_smp(target, false);
556
557 return aarch64_halt_one(target, HALT_SYNC);
558 }
559
560 static int aarch64_restore_one(struct target *target, int current,
561 uint64_t *address, int handle_breakpoints, int debug_execution)
562 {
563 struct armv8_common *armv8 = target_to_armv8(target);
564 struct arm *arm = &armv8->arm;
565 int retval;
566 uint64_t resume_pc;
567
568 LOG_DEBUG("%s", target_name(target));
569
570 if (!debug_execution)
571 target_free_all_working_areas(target);
572
573 /* current = 1: continue on current pc, otherwise continue at <address> */
574 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
575 if (!current)
576 resume_pc = *address;
577 else
578 *address = resume_pc;
579
580 /* Make sure that the Armv7 gdb thumb fixups does not
581 * kill the return address
582 */
583 switch (arm->core_state) {
584 case ARM_STATE_ARM:
585 resume_pc &= 0xFFFFFFFC;
586 break;
587 case ARM_STATE_AARCH64:
588 resume_pc &= 0xFFFFFFFFFFFFFFFC;
589 break;
590 case ARM_STATE_THUMB:
591 case ARM_STATE_THUMB_EE:
592 /* When the return address is loaded into PC
593 * bit 0 must be 1 to stay in Thumb state
594 */
595 resume_pc |= 0x1;
596 break;
597 case ARM_STATE_JAZELLE:
598 LOG_ERROR("How do I resume into Jazelle state??");
599 return ERROR_FAIL;
600 }
601 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
602 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
603 arm->pc->dirty = true;
604 arm->pc->valid = true;
605
606 /* called it now before restoring context because it uses cpu
607 * register r0 for restoring system control register */
608 retval = aarch64_restore_system_control_reg(target);
609 if (retval == ERROR_OK)
610 retval = aarch64_restore_context(target, handle_breakpoints);
611
612 return retval;
613 }
614
615 /**
616 * prepare single target for restart
617 *
618 *
619 */
620 static int aarch64_prepare_restart_one(struct target *target)
621 {
622 struct armv8_common *armv8 = target_to_armv8(target);
623 int retval;
624 uint32_t dscr;
625 uint32_t tmp;
626
627 LOG_DEBUG("%s", target_name(target));
628
629 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
630 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
631 if (retval != ERROR_OK)
632 return retval;
633
634 if ((dscr & DSCR_ITE) == 0)
635 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
636 if ((dscr & DSCR_ERR) != 0)
637 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
638
639 /* acknowledge a pending CTI halt event */
640 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
641 /*
642 * open the CTI gate for channel 1 so that the restart events
643 * get passed along to all PEs. Also close gate for channel 0
644 * to isolate the PE from halt events.
645 */
646 if (retval == ERROR_OK)
647 retval = arm_cti_ungate_channel(armv8->cti, 1);
648 if (retval == ERROR_OK)
649 retval = arm_cti_gate_channel(armv8->cti, 0);
650
651 /* make sure that DSCR.HDE is set */
652 if (retval == ERROR_OK) {
653 dscr |= DSCR_HDE;
654 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
655 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
656 }
657
658 if (retval == ERROR_OK) {
659 /* clear sticky bits in PRSR, SDR is now 0 */
660 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
661 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
662 }
663
664 return retval;
665 }
666
667 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
668 {
669 struct armv8_common *armv8 = target_to_armv8(target);
670 int retval;
671
672 LOG_DEBUG("%s", target_name(target));
673
674 /* trigger an event on channel 1, generates a restart request to the PE */
675 retval = arm_cti_pulse_channel(armv8->cti, 1);
676 if (retval != ERROR_OK)
677 return retval;
678
679 if (mode == RESTART_SYNC) {
680 int64_t then = timeval_ms();
681 for (;;) {
682 int resumed;
683 /*
684 * if PRSR.SDR is set now, the target did restart, even
685 * if it's now already halted again (e.g. due to breakpoint)
686 */
687 retval = aarch64_check_state_one(target,
688 PRSR_SDR, PRSR_SDR, &resumed, NULL);
689 if (retval != ERROR_OK || resumed)
690 break;
691
692 if (timeval_ms() > then + 1000) {
693 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
694 retval = ERROR_TARGET_TIMEOUT;
695 break;
696 }
697 }
698 }
699
700 if (retval != ERROR_OK)
701 return retval;
702
703 target->debug_reason = DBG_REASON_NOTHALTED;
704 target->state = TARGET_RUNNING;
705
706 return ERROR_OK;
707 }
708
709 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
710 {
711 int retval;
712
713 LOG_DEBUG("%s", target_name(target));
714
715 retval = aarch64_prepare_restart_one(target);
716 if (retval == ERROR_OK)
717 retval = aarch64_do_restart_one(target, mode);
718
719 return retval;
720 }
721
722 /*
723 * prepare all but the current target for restart
724 */
725 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
726 {
727 int retval = ERROR_OK;
728 struct target_list *head;
729 struct target *first = NULL;
730 uint64_t address;
731
732 foreach_smp_target(head, target->head) {
733 struct target *curr = head->target;
734
735 /* skip calling target */
736 if (curr == target)
737 continue;
738 if (!target_was_examined(curr))
739 continue;
740 if (curr->state != TARGET_HALTED)
741 continue;
742
743 /* resume at current address, not in step mode */
744 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
745 if (retval == ERROR_OK)
746 retval = aarch64_prepare_restart_one(curr);
747 if (retval != ERROR_OK) {
748 LOG_ERROR("failed to restore target %s", target_name(curr));
749 break;
750 }
751 /* remember the first valid target in the group */
752 if (first == NULL)
753 first = curr;
754 }
755
756 if (p_first)
757 *p_first = first;
758
759 return retval;
760 }
761
762
763 static int aarch64_step_restart_smp(struct target *target)
764 {
765 int retval = ERROR_OK;
766 struct target_list *head;
767 struct target *first = NULL;
768
769 LOG_DEBUG("%s", target_name(target));
770
771 retval = aarch64_prep_restart_smp(target, 0, &first);
772 if (retval != ERROR_OK)
773 return retval;
774
775 if (first != NULL)
776 retval = aarch64_do_restart_one(first, RESTART_LAZY);
777 if (retval != ERROR_OK) {
778 LOG_DEBUG("error restarting target %s", target_name(first));
779 return retval;
780 }
781
782 int64_t then = timeval_ms();
783 for (;;) {
784 struct target *curr = target;
785 bool all_resumed = true;
786
787 foreach_smp_target(head, target->head) {
788 uint32_t prsr;
789 int resumed;
790
791 curr = head->target;
792
793 if (curr == target)
794 continue;
795
796 if (!target_was_examined(curr))
797 continue;
798
799 retval = aarch64_check_state_one(curr,
800 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
801 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
802 all_resumed = false;
803 break;
804 }
805
806 if (curr->state != TARGET_RUNNING) {
807 curr->state = TARGET_RUNNING;
808 curr->debug_reason = DBG_REASON_NOTHALTED;
809 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
810 }
811 }
812
813 if (all_resumed)
814 break;
815
816 if (timeval_ms() > then + 1000) {
817 LOG_ERROR("%s: timeout waiting for target resume", __func__);
818 retval = ERROR_TARGET_TIMEOUT;
819 break;
820 }
821 /*
822 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
823 * and it looks like the CTI's are not connected by a common
824 * trigger matrix. It seems that we need to halt one core in each
825 * cluster explicitly. So if we find that a core has not halted
826 * yet, we trigger an explicit resume for the second cluster.
827 */
828 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
829 if (retval != ERROR_OK)
830 break;
831 }
832
833 return retval;
834 }
835
836 static int aarch64_resume(struct target *target, int current,
837 target_addr_t address, int handle_breakpoints, int debug_execution)
838 {
839 int retval = 0;
840 uint64_t addr = address;
841
842 struct armv8_common *armv8 = target_to_armv8(target);
843 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
844
845 if (target->state != TARGET_HALTED)
846 return ERROR_TARGET_NOT_HALTED;
847
848 /*
849 * If this target is part of a SMP group, prepare the others
850 * targets for resuming. This involves restoring the complete
851 * target register context and setting up CTI gates to accept
852 * resume events from the trigger matrix.
853 */
854 if (target->smp) {
855 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
856 if (retval != ERROR_OK)
857 return retval;
858 }
859
860 /* all targets prepared, restore and restart the current target */
861 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
862 debug_execution);
863 if (retval == ERROR_OK)
864 retval = aarch64_restart_one(target, RESTART_SYNC);
865 if (retval != ERROR_OK)
866 return retval;
867
868 if (target->smp) {
869 int64_t then = timeval_ms();
870 for (;;) {
871 struct target *curr = target;
872 struct target_list *head;
873 bool all_resumed = true;
874
875 foreach_smp_target(head, target->head) {
876 uint32_t prsr;
877 int resumed;
878
879 curr = head->target;
880 if (curr == target)
881 continue;
882 if (!target_was_examined(curr))
883 continue;
884
885 retval = aarch64_check_state_one(curr,
886 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
887 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
888 all_resumed = false;
889 break;
890 }
891
892 if (curr->state != TARGET_RUNNING) {
893 curr->state = TARGET_RUNNING;
894 curr->debug_reason = DBG_REASON_NOTHALTED;
895 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
896 }
897 }
898
899 if (all_resumed)
900 break;
901
902 if (timeval_ms() > then + 1000) {
903 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
904 retval = ERROR_TARGET_TIMEOUT;
905 break;
906 }
907
908 /*
909 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
910 * and it looks like the CTI's are not connected by a common
911 * trigger matrix. It seems that we need to halt one core in each
912 * cluster explicitly. So if we find that a core has not halted
913 * yet, we trigger an explicit resume for the second cluster.
914 */
915 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
916 if (retval != ERROR_OK)
917 break;
918 }
919 }
920
921 if (retval != ERROR_OK)
922 return retval;
923
924 target->debug_reason = DBG_REASON_NOTHALTED;
925
926 if (!debug_execution) {
927 target->state = TARGET_RUNNING;
928 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
929 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
930 } else {
931 target->state = TARGET_DEBUG_RUNNING;
932 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
933 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
934 }
935
936 return ERROR_OK;
937 }
938
939 static int aarch64_debug_entry(struct target *target)
940 {
941 int retval = ERROR_OK;
942 struct armv8_common *armv8 = target_to_armv8(target);
943 struct arm_dpm *dpm = &armv8->dpm;
944 enum arm_state core_state;
945 uint32_t dscr;
946
947 /* make sure to clear all sticky errors */
948 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
949 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
950 if (retval == ERROR_OK)
951 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
952 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
953 if (retval == ERROR_OK)
954 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
955
956 if (retval != ERROR_OK)
957 return retval;
958
959 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
960
961 dpm->dscr = dscr;
962 core_state = armv8_dpm_get_core_state(dpm);
963 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
964 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
965
966 /* close the CTI gate for all events */
967 if (retval == ERROR_OK)
968 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
969 /* discard async exceptions */
970 if (retval == ERROR_OK)
971 retval = dpm->instr_cpsr_sync(dpm);
972 if (retval != ERROR_OK)
973 return retval;
974
975 /* Examine debug reason */
976 armv8_dpm_report_dscr(dpm, dscr);
977
978 /* save address of instruction that triggered the watchpoint? */
979 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
980 uint32_t tmp;
981 uint64_t wfar = 0;
982
983 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
984 armv8->debug_base + CPUV8_DBG_WFAR1,
985 &tmp);
986 if (retval != ERROR_OK)
987 return retval;
988 wfar = tmp;
989 wfar = (wfar << 32);
990 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
991 armv8->debug_base + CPUV8_DBG_WFAR0,
992 &tmp);
993 if (retval != ERROR_OK)
994 return retval;
995 wfar |= tmp;
996 armv8_dpm_report_wfar(&armv8->dpm, wfar);
997 }
998
999 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1000
1001 if (retval == ERROR_OK && armv8->post_debug_entry)
1002 retval = armv8->post_debug_entry(target);
1003
1004 return retval;
1005 }
1006
1007 static int aarch64_post_debug_entry(struct target *target)
1008 {
1009 struct aarch64_common *aarch64 = target_to_aarch64(target);
1010 struct armv8_common *armv8 = &aarch64->armv8_common;
1011 int retval;
1012 enum arm_mode target_mode = ARM_MODE_ANY;
1013 uint32_t instr;
1014
1015 switch (armv8->arm.core_mode) {
1016 case ARMV8_64_EL0T:
1017 target_mode = ARMV8_64_EL1H;
1018 /* fall through */
1019 case ARMV8_64_EL1T:
1020 case ARMV8_64_EL1H:
1021 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1022 break;
1023 case ARMV8_64_EL2T:
1024 case ARMV8_64_EL2H:
1025 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1026 break;
1027 case ARMV8_64_EL3H:
1028 case ARMV8_64_EL3T:
1029 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1030 break;
1031
1032 case ARM_MODE_SVC:
1033 case ARM_MODE_ABT:
1034 case ARM_MODE_FIQ:
1035 case ARM_MODE_IRQ:
1036 case ARM_MODE_SYS:
1037 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1038 break;
1039
1040 default:
1041 LOG_INFO("cannot read system control register in this mode");
1042 return ERROR_FAIL;
1043 }
1044
1045 if (target_mode != ARM_MODE_ANY)
1046 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1047
1048 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1049 if (retval != ERROR_OK)
1050 return retval;
1051
1052 if (target_mode != ARM_MODE_ANY)
1053 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1054
1055 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1056 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1057
1058 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1059 armv8_identify_cache(armv8);
1060 armv8_read_mpidr(armv8);
1061 }
1062
1063 armv8->armv8_mmu.mmu_enabled =
1064 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1065 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1066 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1067 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1068 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1069 return ERROR_OK;
1070 }
1071
1072 /*
1073 * single-step a target
1074 */
1075 static int aarch64_step(struct target *target, int current, target_addr_t address,
1076 int handle_breakpoints)
1077 {
1078 struct armv8_common *armv8 = target_to_armv8(target);
1079 struct aarch64_common *aarch64 = target_to_aarch64(target);
1080 int saved_retval = ERROR_OK;
1081 int retval;
1082 uint32_t edecr;
1083
1084 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1085
1086 if (target->state != TARGET_HALTED) {
1087 LOG_WARNING("target not halted");
1088 return ERROR_TARGET_NOT_HALTED;
1089 }
1090
1091 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1092 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1093 /* make sure EDECR.SS is not set when restoring the register */
1094
1095 if (retval == ERROR_OK) {
1096 edecr &= ~0x4;
1097 /* set EDECR.SS to enter hardware step mode */
1098 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1099 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1100 }
1101 /* disable interrupts while stepping */
1102 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1103 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1104 /* bail out if stepping setup has failed */
1105 if (retval != ERROR_OK)
1106 return retval;
1107
1108 if (target->smp && (current == 1)) {
1109 /*
1110 * isolate current target so that it doesn't get resumed
1111 * together with the others
1112 */
1113 retval = arm_cti_gate_channel(armv8->cti, 1);
1114 /* resume all other targets in the group */
1115 if (retval == ERROR_OK)
1116 retval = aarch64_step_restart_smp(target);
1117 if (retval != ERROR_OK) {
1118 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1119 return retval;
1120 }
1121 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1122 }
1123
1124 /* all other targets running, restore and restart the current target */
1125 retval = aarch64_restore_one(target, current, &address, 0, 0);
1126 if (retval == ERROR_OK)
1127 retval = aarch64_restart_one(target, RESTART_LAZY);
1128
1129 if (retval != ERROR_OK)
1130 return retval;
1131
1132 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1133 if (!handle_breakpoints)
1134 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1135
1136 int64_t then = timeval_ms();
1137 for (;;) {
1138 int stepped;
1139 uint32_t prsr;
1140
1141 retval = aarch64_check_state_one(target,
1142 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1143 if (retval != ERROR_OK || stepped)
1144 break;
1145
1146 if (timeval_ms() > then + 100) {
1147 LOG_ERROR("timeout waiting for target %s halt after step",
1148 target_name(target));
1149 retval = ERROR_TARGET_TIMEOUT;
1150 break;
1151 }
1152 }
1153
1154 /*
1155 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1156 * causes a timeout. The core takes the step but doesn't complete it and so
1157 * debug state is never entered. However, you can manually halt the core
1158 * as an external debug even is also a WFI wakeup event.
1159 */
1160 if (retval == ERROR_TARGET_TIMEOUT)
1161 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1162
1163 /* restore EDECR */
1164 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1165 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168
1169 /* restore interrupts */
1170 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1171 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1172 if (retval != ERROR_OK)
1173 return ERROR_OK;
1174 }
1175
1176 if (saved_retval != ERROR_OK)
1177 return saved_retval;
1178
1179 return aarch64_poll(target);
1180 }
1181
1182 static int aarch64_restore_context(struct target *target, bool bpwp)
1183 {
1184 struct armv8_common *armv8 = target_to_armv8(target);
1185 struct arm *arm = &armv8->arm;
1186
1187 int retval;
1188
1189 LOG_DEBUG("%s", target_name(target));
1190
1191 if (armv8->pre_restore_context)
1192 armv8->pre_restore_context(target);
1193
1194 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1195 if (retval == ERROR_OK) {
1196 /* registers are now invalid */
1197 register_cache_invalidate(arm->core_cache);
1198 register_cache_invalidate(arm->core_cache->next);
1199 }
1200
1201 return retval;
1202 }
1203
1204 /*
1205 * Cortex-A8 Breakpoint and watchpoint functions
1206 */
1207
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int aarch64_set_breakpoint(struct target *target,
1210 struct breakpoint *breakpoint, uint8_t matchmode)
1211 {
1212 int retval;
1213 int brp_i = 0;
1214 uint32_t control;
1215 uint8_t byte_addr_select = 0x0F;
1216 struct aarch64_common *aarch64 = target_to_aarch64(target);
1217 struct armv8_common *armv8 = &aarch64->armv8_common;
1218 struct aarch64_brp *brp_list = aarch64->brp_list;
1219
1220 if (breakpoint->set) {
1221 LOG_WARNING("breakpoint already set");
1222 return ERROR_OK;
1223 }
1224
1225 if (breakpoint->type == BKPT_HARD) {
1226 int64_t bpt_value;
1227 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1228 brp_i++;
1229 if (brp_i >= aarch64->brp_num) {
1230 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1231 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1232 }
1233 breakpoint->set = brp_i + 1;
1234 if (breakpoint->length == 2)
1235 byte_addr_select = (3 << (breakpoint->address & 0x02));
1236 control = ((matchmode & 0x7) << 20)
1237 | (1 << 13)
1238 | (byte_addr_select << 5)
1239 | (3 << 1) | 1;
1240 brp_list[brp_i].used = 1;
1241 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1242 brp_list[brp_i].control = control;
1243 bpt_value = brp_list[brp_i].value;
1244
1245 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1246 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1247 (uint32_t)(bpt_value & 0xFFFFFFFF));
1248 if (retval != ERROR_OK)
1249 return retval;
1250 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1251 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1252 (uint32_t)(bpt_value >> 32));
1253 if (retval != ERROR_OK)
1254 return retval;
1255
1256 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1257 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1258 brp_list[brp_i].control);
1259 if (retval != ERROR_OK)
1260 return retval;
1261 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1262 brp_list[brp_i].control,
1263 brp_list[brp_i].value);
1264
1265 } else if (breakpoint->type == BKPT_SOFT) {
1266 uint8_t code[4];
1267
1268 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1269 retval = target_read_memory(target,
1270 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1271 breakpoint->length, 1,
1272 breakpoint->orig_instr);
1273 if (retval != ERROR_OK)
1274 return retval;
1275
1276 armv8_cache_d_inner_flush_virt(armv8,
1277 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1278 breakpoint->length);
1279
1280 retval = target_write_memory(target,
1281 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1282 breakpoint->length, 1, code);
1283 if (retval != ERROR_OK)
1284 return retval;
1285
1286 armv8_cache_d_inner_flush_virt(armv8,
1287 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1288 breakpoint->length);
1289
1290 armv8_cache_i_inner_inval_virt(armv8,
1291 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1292 breakpoint->length);
1293
1294 breakpoint->set = 0x11; /* Any nice value but 0 */
1295 }
1296
1297 /* Ensure that halting debug mode is enable */
1298 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1299 if (retval != ERROR_OK) {
1300 LOG_DEBUG("Failed to set DSCR.HDE");
1301 return retval;
1302 }
1303
1304 return ERROR_OK;
1305 }
1306
1307 static int aarch64_set_context_breakpoint(struct target *target,
1308 struct breakpoint *breakpoint, uint8_t matchmode)
1309 {
1310 int retval = ERROR_FAIL;
1311 int brp_i = 0;
1312 uint32_t control;
1313 uint8_t byte_addr_select = 0x0F;
1314 struct aarch64_common *aarch64 = target_to_aarch64(target);
1315 struct armv8_common *armv8 = &aarch64->armv8_common;
1316 struct aarch64_brp *brp_list = aarch64->brp_list;
1317
1318 if (breakpoint->set) {
1319 LOG_WARNING("breakpoint already set");
1320 return retval;
1321 }
1322 /*check available context BRPs*/
1323 while ((brp_list[brp_i].used ||
1324 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1325 brp_i++;
1326
1327 if (brp_i >= aarch64->brp_num) {
1328 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1329 return ERROR_FAIL;
1330 }
1331
1332 breakpoint->set = brp_i + 1;
1333 control = ((matchmode & 0x7) << 20)
1334 | (1 << 13)
1335 | (byte_addr_select << 5)
1336 | (3 << 1) | 1;
1337 brp_list[brp_i].used = 1;
1338 brp_list[brp_i].value = (breakpoint->asid);
1339 brp_list[brp_i].control = control;
1340 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1341 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1342 brp_list[brp_i].value);
1343 if (retval != ERROR_OK)
1344 return retval;
1345 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1346 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1347 brp_list[brp_i].control);
1348 if (retval != ERROR_OK)
1349 return retval;
1350 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1351 brp_list[brp_i].control,
1352 brp_list[brp_i].value);
1353 return ERROR_OK;
1354
1355 }
1356
1357 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1358 {
1359 int retval = ERROR_FAIL;
1360 int brp_1 = 0; /* holds the contextID pair */
1361 int brp_2 = 0; /* holds the IVA pair */
1362 uint32_t control_CTX, control_IVA;
1363 uint8_t CTX_byte_addr_select = 0x0F;
1364 uint8_t IVA_byte_addr_select = 0x0F;
1365 uint8_t CTX_machmode = 0x03;
1366 uint8_t IVA_machmode = 0x01;
1367 struct aarch64_common *aarch64 = target_to_aarch64(target);
1368 struct armv8_common *armv8 = &aarch64->armv8_common;
1369 struct aarch64_brp *brp_list = aarch64->brp_list;
1370
1371 if (breakpoint->set) {
1372 LOG_WARNING("breakpoint already set");
1373 return retval;
1374 }
1375 /*check available context BRPs*/
1376 while ((brp_list[brp_1].used ||
1377 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1378 brp_1++;
1379
1380 printf("brp(CTX) found num: %d\n", brp_1);
1381 if (brp_1 >= aarch64->brp_num) {
1382 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1383 return ERROR_FAIL;
1384 }
1385
1386 while ((brp_list[brp_2].used ||
1387 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1388 brp_2++;
1389
1390 printf("brp(IVA) found num: %d\n", brp_2);
1391 if (brp_2 >= aarch64->brp_num) {
1392 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1393 return ERROR_FAIL;
1394 }
1395
1396 breakpoint->set = brp_1 + 1;
1397 breakpoint->linked_BRP = brp_2;
1398 control_CTX = ((CTX_machmode & 0x7) << 20)
1399 | (brp_2 << 16)
1400 | (0 << 14)
1401 | (CTX_byte_addr_select << 5)
1402 | (3 << 1) | 1;
1403 brp_list[brp_1].used = 1;
1404 brp_list[brp_1].value = (breakpoint->asid);
1405 brp_list[brp_1].control = control_CTX;
1406 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1407 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1408 brp_list[brp_1].value);
1409 if (retval != ERROR_OK)
1410 return retval;
1411 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1412 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1413 brp_list[brp_1].control);
1414 if (retval != ERROR_OK)
1415 return retval;
1416
1417 control_IVA = ((IVA_machmode & 0x7) << 20)
1418 | (brp_1 << 16)
1419 | (1 << 13)
1420 | (IVA_byte_addr_select << 5)
1421 | (3 << 1) | 1;
1422 brp_list[brp_2].used = 1;
1423 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1424 brp_list[brp_2].control = control_IVA;
1425 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1426 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1427 brp_list[brp_2].value & 0xFFFFFFFF);
1428 if (retval != ERROR_OK)
1429 return retval;
1430 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1431 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1432 brp_list[brp_2].value >> 32);
1433 if (retval != ERROR_OK)
1434 return retval;
1435 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1436 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1437 brp_list[brp_2].control);
1438 if (retval != ERROR_OK)
1439 return retval;
1440
1441 return ERROR_OK;
1442 }
1443
1444 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1445 {
1446 int retval;
1447 struct aarch64_common *aarch64 = target_to_aarch64(target);
1448 struct armv8_common *armv8 = &aarch64->armv8_common;
1449 struct aarch64_brp *brp_list = aarch64->brp_list;
1450
1451 if (!breakpoint->set) {
1452 LOG_WARNING("breakpoint not set");
1453 return ERROR_OK;
1454 }
1455
1456 if (breakpoint->type == BKPT_HARD) {
1457 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1458 int brp_i = breakpoint->set - 1;
1459 int brp_j = breakpoint->linked_BRP;
1460 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1461 LOG_DEBUG("Invalid BRP number in breakpoint");
1462 return ERROR_OK;
1463 }
1464 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1465 brp_list[brp_i].control, brp_list[brp_i].value);
1466 brp_list[brp_i].used = 0;
1467 brp_list[brp_i].value = 0;
1468 brp_list[brp_i].control = 0;
1469 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1470 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1471 brp_list[brp_i].control);
1472 if (retval != ERROR_OK)
1473 return retval;
1474 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1475 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1476 (uint32_t)brp_list[brp_i].value);
1477 if (retval != ERROR_OK)
1478 return retval;
1479 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1480 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1481 (uint32_t)brp_list[brp_i].value);
1482 if (retval != ERROR_OK)
1483 return retval;
1484 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1485 LOG_DEBUG("Invalid BRP number in breakpoint");
1486 return ERROR_OK;
1487 }
1488 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1489 brp_list[brp_j].control, brp_list[brp_j].value);
1490 brp_list[brp_j].used = 0;
1491 brp_list[brp_j].value = 0;
1492 brp_list[brp_j].control = 0;
1493 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1494 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1495 brp_list[brp_j].control);
1496 if (retval != ERROR_OK)
1497 return retval;
1498 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1499 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1500 (uint32_t)brp_list[brp_j].value);
1501 if (retval != ERROR_OK)
1502 return retval;
1503 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1504 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1505 (uint32_t)brp_list[brp_j].value);
1506 if (retval != ERROR_OK)
1507 return retval;
1508
1509 breakpoint->linked_BRP = 0;
1510 breakpoint->set = 0;
1511 return ERROR_OK;
1512
1513 } else {
1514 int brp_i = breakpoint->set - 1;
1515 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1516 LOG_DEBUG("Invalid BRP number in breakpoint");
1517 return ERROR_OK;
1518 }
1519 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1520 brp_list[brp_i].control, brp_list[brp_i].value);
1521 brp_list[brp_i].used = 0;
1522 brp_list[brp_i].value = 0;
1523 brp_list[brp_i].control = 0;
1524 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1525 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1526 brp_list[brp_i].control);
1527 if (retval != ERROR_OK)
1528 return retval;
1529 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1530 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1531 brp_list[brp_i].value);
1532 if (retval != ERROR_OK)
1533 return retval;
1534
1535 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1536 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1537 (uint32_t)brp_list[brp_i].value);
1538 if (retval != ERROR_OK)
1539 return retval;
1540 breakpoint->set = 0;
1541 return ERROR_OK;
1542 }
1543 } else {
1544 /* restore original instruction (kept in target endianness) */
1545
1546 armv8_cache_d_inner_flush_virt(armv8,
1547 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1548 breakpoint->length);
1549
1550 if (breakpoint->length == 4) {
1551 retval = target_write_memory(target,
1552 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1553 4, 1, breakpoint->orig_instr);
1554 if (retval != ERROR_OK)
1555 return retval;
1556 } else {
1557 retval = target_write_memory(target,
1558 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1559 2, 1, breakpoint->orig_instr);
1560 if (retval != ERROR_OK)
1561 return retval;
1562 }
1563
1564 armv8_cache_d_inner_flush_virt(armv8,
1565 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1566 breakpoint->length);
1567
1568 armv8_cache_i_inner_inval_virt(armv8,
1569 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1570 breakpoint->length);
1571 }
1572 breakpoint->set = 0;
1573
1574 return ERROR_OK;
1575 }
1576
1577 static int aarch64_add_breakpoint(struct target *target,
1578 struct breakpoint *breakpoint)
1579 {
1580 struct aarch64_common *aarch64 = target_to_aarch64(target);
1581
1582 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1583 LOG_INFO("no hardware breakpoint available");
1584 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1585 }
1586
1587 if (breakpoint->type == BKPT_HARD)
1588 aarch64->brp_num_available--;
1589
1590 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1591 }
1592
1593 static int aarch64_add_context_breakpoint(struct target *target,
1594 struct breakpoint *breakpoint)
1595 {
1596 struct aarch64_common *aarch64 = target_to_aarch64(target);
1597
1598 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1599 LOG_INFO("no hardware breakpoint available");
1600 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1601 }
1602
1603 if (breakpoint->type == BKPT_HARD)
1604 aarch64->brp_num_available--;
1605
1606 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1607 }
1608
1609 static int aarch64_add_hybrid_breakpoint(struct target *target,
1610 struct breakpoint *breakpoint)
1611 {
1612 struct aarch64_common *aarch64 = target_to_aarch64(target);
1613
1614 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1615 LOG_INFO("no hardware breakpoint available");
1616 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1617 }
1618
1619 if (breakpoint->type == BKPT_HARD)
1620 aarch64->brp_num_available--;
1621
1622 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1623 }
1624
1625
1626 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1627 {
1628 struct aarch64_common *aarch64 = target_to_aarch64(target);
1629
1630 #if 0
1631 /* It is perfectly possible to remove breakpoints while the target is running */
1632 if (target->state != TARGET_HALTED) {
1633 LOG_WARNING("target not halted");
1634 return ERROR_TARGET_NOT_HALTED;
1635 }
1636 #endif
1637
1638 if (breakpoint->set) {
1639 aarch64_unset_breakpoint(target, breakpoint);
1640 if (breakpoint->type == BKPT_HARD)
1641 aarch64->brp_num_available++;
1642 }
1643
1644 return ERROR_OK;
1645 }
1646
1647 /*
1648 * Cortex-A8 Reset functions
1649 */
1650
1651 static int aarch64_assert_reset(struct target *target)
1652 {
1653 struct armv8_common *armv8 = target_to_armv8(target);
1654
1655 LOG_DEBUG(" ");
1656
1657 /* FIXME when halt is requested, make it work somehow... */
1658
1659 /* Issue some kind of warm reset. */
1660 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1661 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1662 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1663 /* REVISIT handle "pulls" cases, if there's
1664 * hardware that needs them to work.
1665 */
1666 adapter_assert_reset();
1667 } else {
1668 LOG_ERROR("%s: how to reset?", target_name(target));
1669 return ERROR_FAIL;
1670 }
1671
1672 /* registers are now invalid */
1673 if (target_was_examined(target)) {
1674 register_cache_invalidate(armv8->arm.core_cache);
1675 register_cache_invalidate(armv8->arm.core_cache->next);
1676 }
1677
1678 target->state = TARGET_RESET;
1679
1680 return ERROR_OK;
1681 }
1682
1683 static int aarch64_deassert_reset(struct target *target)
1684 {
1685 int retval;
1686
1687 LOG_DEBUG(" ");
1688
1689 /* be certain SRST is off */
1690 adapter_deassert_reset();
1691
1692 if (!target_was_examined(target))
1693 return ERROR_OK;
1694
1695 retval = aarch64_poll(target);
1696 if (retval != ERROR_OK)
1697 return retval;
1698
1699 retval = aarch64_init_debug_access(target);
1700 if (retval != ERROR_OK)
1701 return retval;
1702
1703 if (target->reset_halt) {
1704 if (target->state != TARGET_HALTED) {
1705 LOG_WARNING("%s: ran after reset and before halt ...",
1706 target_name(target));
1707 retval = target_halt(target);
1708 }
1709 }
1710
1711 return retval;
1712 }
1713
1714 static int aarch64_write_cpu_memory_slow(struct target *target,
1715 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1716 {
1717 struct armv8_common *armv8 = target_to_armv8(target);
1718 struct arm_dpm *dpm = &armv8->dpm;
1719 struct arm *arm = &armv8->arm;
1720 int retval;
1721
1722 armv8_reg_current(arm, 1)->dirty = true;
1723
1724 /* change DCC to normal mode if necessary */
1725 if (*dscr & DSCR_MA) {
1726 *dscr &= ~DSCR_MA;
1727 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1728 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1729 if (retval != ERROR_OK)
1730 return retval;
1731 }
1732
1733 while (count) {
1734 uint32_t data, opcode;
1735
1736 /* write the data to store into DTRRX */
1737 if (size == 1)
1738 data = *buffer;
1739 else if (size == 2)
1740 data = target_buffer_get_u16(target, buffer);
1741 else
1742 data = target_buffer_get_u32(target, buffer);
1743 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1744 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1745 if (retval != ERROR_OK)
1746 return retval;
1747
1748 if (arm->core_state == ARM_STATE_AARCH64)
1749 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1750 else
1751 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1752 if (retval != ERROR_OK)
1753 return retval;
1754
1755 if (size == 1)
1756 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1757 else if (size == 2)
1758 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1759 else
1760 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1761 retval = dpm->instr_execute(dpm, opcode);
1762 if (retval != ERROR_OK)
1763 return retval;
1764
1765 /* Advance */
1766 buffer += size;
1767 --count;
1768 }
1769
1770 return ERROR_OK;
1771 }
1772
1773 static int aarch64_write_cpu_memory_fast(struct target *target,
1774 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1775 {
1776 struct armv8_common *armv8 = target_to_armv8(target);
1777 struct arm *arm = &armv8->arm;
1778 int retval;
1779
1780 armv8_reg_current(arm, 1)->dirty = true;
1781
1782 /* Step 1.d - Change DCC to memory mode */
1783 *dscr |= DSCR_MA;
1784 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1785 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1786 if (retval != ERROR_OK)
1787 return retval;
1788
1789
1790 /* Step 2.a - Do the write */
1791 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1792 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1793 if (retval != ERROR_OK)
1794 return retval;
1795
1796 /* Step 3.a - Switch DTR mode back to Normal mode */
1797 *dscr &= ~DSCR_MA;
1798 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1799 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1800 if (retval != ERROR_OK)
1801 return retval;
1802
1803 return ERROR_OK;
1804 }
1805
1806 static int aarch64_write_cpu_memory(struct target *target,
1807 uint64_t address, uint32_t size,
1808 uint32_t count, const uint8_t *buffer)
1809 {
1810 /* write memory through APB-AP */
1811 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1812 struct armv8_common *armv8 = target_to_armv8(target);
1813 struct arm_dpm *dpm = &armv8->dpm;
1814 struct arm *arm = &armv8->arm;
1815 uint32_t dscr;
1816
1817 if (target->state != TARGET_HALTED) {
1818 LOG_WARNING("target not halted");
1819 return ERROR_TARGET_NOT_HALTED;
1820 }
1821
1822 /* Mark register X0 as dirty, as it will be used
1823 * for transferring the data.
1824 * It will be restored automatically when exiting
1825 * debug mode
1826 */
1827 armv8_reg_current(arm, 0)->dirty = true;
1828
1829 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1830
1831 /* Read DSCR */
1832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1833 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1834 if (retval != ERROR_OK)
1835 return retval;
1836
1837 /* Set Normal access mode */
1838 dscr = (dscr & ~DSCR_MA);
1839 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1840 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1841 if (retval != ERROR_OK)
1842 return retval;
1843
1844 if (arm->core_state == ARM_STATE_AARCH64) {
1845 /* Write X0 with value 'address' using write procedure */
1846 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1847 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1848 retval = dpm->instr_write_data_dcc_64(dpm,
1849 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1850 } else {
1851 /* Write R0 with value 'address' using write procedure */
1852 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1853 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1854 retval = dpm->instr_write_data_dcc(dpm,
1855 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1856 }
1857
1858 if (retval != ERROR_OK)
1859 return retval;
1860
1861 if (size == 4 && (address % 4) == 0)
1862 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1863 else
1864 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1865
1866 if (retval != ERROR_OK) {
1867 /* Unset DTR mode */
1868 mem_ap_read_atomic_u32(armv8->debug_ap,
1869 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1870 dscr &= ~DSCR_MA;
1871 mem_ap_write_atomic_u32(armv8->debug_ap,
1872 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1873 }
1874
1875 /* Check for sticky abort flags in the DSCR */
1876 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1877 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1878 if (retval != ERROR_OK)
1879 return retval;
1880
1881 dpm->dscr = dscr;
1882 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1883 /* Abort occurred - clear it and exit */
1884 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1885 armv8_dpm_handle_exception(dpm, true);
1886 return ERROR_FAIL;
1887 }
1888
1889 /* Done */
1890 return ERROR_OK;
1891 }
1892
1893 static int aarch64_read_cpu_memory_slow(struct target *target,
1894 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1895 {
1896 struct armv8_common *armv8 = target_to_armv8(target);
1897 struct arm_dpm *dpm = &armv8->dpm;
1898 struct arm *arm = &armv8->arm;
1899 int retval;
1900
1901 armv8_reg_current(arm, 1)->dirty = true;
1902
1903 /* change DCC to normal mode (if necessary) */
1904 if (*dscr & DSCR_MA) {
1905 *dscr &= DSCR_MA;
1906 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1907 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1908 if (retval != ERROR_OK)
1909 return retval;
1910 }
1911
1912 while (count) {
1913 uint32_t opcode, data;
1914
1915 if (size == 1)
1916 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1917 else if (size == 2)
1918 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1919 else
1920 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1921 retval = dpm->instr_execute(dpm, opcode);
1922 if (retval != ERROR_OK)
1923 return retval;
1924
1925 if (arm->core_state == ARM_STATE_AARCH64)
1926 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1927 else
1928 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1929 if (retval != ERROR_OK)
1930 return retval;
1931
1932 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1933 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1934 if (retval != ERROR_OK)
1935 return retval;
1936
1937 if (size == 1)
1938 *buffer = (uint8_t)data;
1939 else if (size == 2)
1940 target_buffer_set_u16(target, buffer, (uint16_t)data);
1941 else
1942 target_buffer_set_u32(target, buffer, data);
1943
1944 /* Advance */
1945 buffer += size;
1946 --count;
1947 }
1948
1949 return ERROR_OK;
1950 }
1951
1952 static int aarch64_read_cpu_memory_fast(struct target *target,
1953 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1954 {
1955 struct armv8_common *armv8 = target_to_armv8(target);
1956 struct arm_dpm *dpm = &armv8->dpm;
1957 struct arm *arm = &armv8->arm;
1958 int retval;
1959 uint32_t value;
1960
1961 /* Mark X1 as dirty */
1962 armv8_reg_current(arm, 1)->dirty = true;
1963
1964 if (arm->core_state == ARM_STATE_AARCH64) {
1965 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1966 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1967 } else {
1968 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1969 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1970 }
1971
1972 if (retval != ERROR_OK)
1973 return retval;
1974
1975 /* Step 1.e - Change DCC to memory mode */
1976 *dscr |= DSCR_MA;
1977 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1978 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1979 if (retval != ERROR_OK)
1980 return retval;
1981
1982 /* Step 1.f - read DBGDTRTX and discard the value */
1983 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1984 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1985 if (retval != ERROR_OK)
1986 return retval;
1987
1988 count--;
1989 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1990 * Abort flags are sticky, so can be read at end of transactions
1991 *
1992 * This data is read in aligned to 32 bit boundary.
1993 */
1994
1995 if (count) {
1996 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1997 * increments X0 by 4. */
1998 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1999 armv8->debug_base + CPUV8_DBG_DTRTX);
2000 if (retval != ERROR_OK)
2001 return retval;
2002 }
2003
2004 /* Step 3.a - set DTR access mode back to Normal mode */
2005 *dscr &= ~DSCR_MA;
2006 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2007 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2008 if (retval != ERROR_OK)
2009 return retval;
2010
2011 /* Step 3.b - read DBGDTRTX for the final value */
2012 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2013 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2014 if (retval != ERROR_OK)
2015 return retval;
2016
2017 target_buffer_set_u32(target, buffer + count * 4, value);
2018 return retval;
2019 }
2020
2021 static int aarch64_read_cpu_memory(struct target *target,
2022 target_addr_t address, uint32_t size,
2023 uint32_t count, uint8_t *buffer)
2024 {
2025 /* read memory through APB-AP */
2026 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2027 struct armv8_common *armv8 = target_to_armv8(target);
2028 struct arm_dpm *dpm = &armv8->dpm;
2029 struct arm *arm = &armv8->arm;
2030 uint32_t dscr;
2031
2032 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2033 address, size, count);
2034
2035 if (target->state != TARGET_HALTED) {
2036 LOG_WARNING("target not halted");
2037 return ERROR_TARGET_NOT_HALTED;
2038 }
2039
2040 /* Mark register X0 as dirty, as it will be used
2041 * for transferring the data.
2042 * It will be restored automatically when exiting
2043 * debug mode
2044 */
2045 armv8_reg_current(arm, 0)->dirty = true;
2046
2047 /* Read DSCR */
2048 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2049 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2050 if (retval != ERROR_OK)
2051 return retval;
2052
2053 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2054
2055 /* Set Normal access mode */
2056 dscr &= ~DSCR_MA;
2057 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2058 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2059 if (retval != ERROR_OK)
2060 return retval;
2061
2062 if (arm->core_state == ARM_STATE_AARCH64) {
2063 /* Write X0 with value 'address' using write procedure */
2064 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2065 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2066 retval = dpm->instr_write_data_dcc_64(dpm,
2067 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2068 } else {
2069 /* Write R0 with value 'address' using write procedure */
2070 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2071 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2072 retval = dpm->instr_write_data_dcc(dpm,
2073 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2074 }
2075
2076 if (retval != ERROR_OK)
2077 return retval;
2078
2079 if (size == 4 && (address % 4) == 0)
2080 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2081 else
2082 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2083
2084 if (dscr & DSCR_MA) {
2085 dscr &= ~DSCR_MA;
2086 mem_ap_write_atomic_u32(armv8->debug_ap,
2087 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2088 }
2089
2090 if (retval != ERROR_OK)
2091 return retval;
2092
2093 /* Check for sticky abort flags in the DSCR */
2094 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2095 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2096 if (retval != ERROR_OK)
2097 return retval;
2098
2099 dpm->dscr = dscr;
2100
2101 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2102 /* Abort occurred - clear it and exit */
2103 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2104 armv8_dpm_handle_exception(dpm, true);
2105 return ERROR_FAIL;
2106 }
2107
2108 /* Done */
2109 return ERROR_OK;
2110 }
2111
2112 static int aarch64_read_phys_memory(struct target *target,
2113 target_addr_t address, uint32_t size,
2114 uint32_t count, uint8_t *buffer)
2115 {
2116 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2117
2118 if (count && buffer) {
2119 /* read memory through APB-AP */
2120 retval = aarch64_mmu_modify(target, 0);
2121 if (retval != ERROR_OK)
2122 return retval;
2123 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2124 }
2125 return retval;
2126 }
2127
2128 static int aarch64_read_memory(struct target *target, target_addr_t address,
2129 uint32_t size, uint32_t count, uint8_t *buffer)
2130 {
2131 int mmu_enabled = 0;
2132 int retval;
2133
2134 /* determine if MMU was enabled on target stop */
2135 retval = aarch64_mmu(target, &mmu_enabled);
2136 if (retval != ERROR_OK)
2137 return retval;
2138
2139 if (mmu_enabled) {
2140 /* enable MMU as we could have disabled it for phys access */
2141 retval = aarch64_mmu_modify(target, 1);
2142 if (retval != ERROR_OK)
2143 return retval;
2144 }
2145 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2146 }
2147
2148 static int aarch64_write_phys_memory(struct target *target,
2149 target_addr_t address, uint32_t size,
2150 uint32_t count, const uint8_t *buffer)
2151 {
2152 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2153
2154 if (count && buffer) {
2155 /* write memory through APB-AP */
2156 retval = aarch64_mmu_modify(target, 0);
2157 if (retval != ERROR_OK)
2158 return retval;
2159 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2160 }
2161
2162 return retval;
2163 }
2164
2165 static int aarch64_write_memory(struct target *target, target_addr_t address,
2166 uint32_t size, uint32_t count, const uint8_t *buffer)
2167 {
2168 int mmu_enabled = 0;
2169 int retval;
2170
2171 /* determine if MMU was enabled on target stop */
2172 retval = aarch64_mmu(target, &mmu_enabled);
2173 if (retval != ERROR_OK)
2174 return retval;
2175
2176 if (mmu_enabled) {
2177 /* enable MMU as we could have disabled it for phys access */
2178 retval = aarch64_mmu_modify(target, 1);
2179 if (retval != ERROR_OK)
2180 return retval;
2181 }
2182 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2183 }
2184
2185 static int aarch64_handle_target_request(void *priv)
2186 {
2187 struct target *target = priv;
2188 struct armv8_common *armv8 = target_to_armv8(target);
2189 int retval;
2190
2191 if (!target_was_examined(target))
2192 return ERROR_OK;
2193 if (!target->dbg_msg_enabled)
2194 return ERROR_OK;
2195
2196 if (target->state == TARGET_RUNNING) {
2197 uint32_t request;
2198 uint32_t dscr;
2199 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2200 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2201
2202 /* check if we have data */
2203 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2204 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2205 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2206 if (retval == ERROR_OK) {
2207 target_request(target, request);
2208 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2209 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2210 }
2211 }
2212 }
2213
2214 return ERROR_OK;
2215 }
2216
2217 static int aarch64_examine_first(struct target *target)
2218 {
2219 struct aarch64_common *aarch64 = target_to_aarch64(target);
2220 struct armv8_common *armv8 = &aarch64->armv8_common;
2221 struct adiv5_dap *swjdp = armv8->arm.dap;
2222 struct aarch64_private_config *pc;
2223 int i;
2224 int retval = ERROR_OK;
2225 uint64_t debug, ttypr;
2226 uint32_t cpuid;
2227 uint32_t tmp0, tmp1, tmp2, tmp3;
2228 debug = ttypr = cpuid = 0;
2229
2230 /* Search for the APB-AB - it is needed for access to debug registers */
2231 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2232 if (retval != ERROR_OK) {
2233 LOG_ERROR("Could not find APB-AP for debug access");
2234 return retval;
2235 }
2236
2237 retval = mem_ap_init(armv8->debug_ap);
2238 if (retval != ERROR_OK) {
2239 LOG_ERROR("Could not initialize the APB-AP");
2240 return retval;
2241 }
2242
2243 armv8->debug_ap->memaccess_tck = 10;
2244
2245 if (!target->dbgbase_set) {
2246 uint32_t dbgbase;
2247 /* Get ROM Table base */
2248 uint32_t apid;
2249 int32_t coreidx = target->coreid;
2250 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2251 if (retval != ERROR_OK)
2252 return retval;
2253 /* Lookup 0x15 -- Processor DAP */
2254 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2255 &armv8->debug_base, &coreidx);
2256 if (retval != ERROR_OK)
2257 return retval;
2258 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2259 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2260 } else
2261 armv8->debug_base = target->dbgbase;
2262
2263 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2264 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2265 if (retval != ERROR_OK) {
2266 LOG_DEBUG("Examine %s failed", "oslock");
2267 return retval;
2268 }
2269
2270 retval = mem_ap_read_u32(armv8->debug_ap,
2271 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2272 if (retval != ERROR_OK) {
2273 LOG_DEBUG("Examine %s failed", "CPUID");
2274 return retval;
2275 }
2276
2277 retval = mem_ap_read_u32(armv8->debug_ap,
2278 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2279 retval += mem_ap_read_u32(armv8->debug_ap,
2280 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2281 if (retval != ERROR_OK) {
2282 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2283 return retval;
2284 }
2285 retval = mem_ap_read_u32(armv8->debug_ap,
2286 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2287 retval += mem_ap_read_u32(armv8->debug_ap,
2288 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2289 if (retval != ERROR_OK) {
2290 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2291 return retval;
2292 }
2293
2294 retval = dap_run(armv8->debug_ap->dap);
2295 if (retval != ERROR_OK) {
2296 LOG_ERROR("%s: examination failed\n", target_name(target));
2297 return retval;
2298 }
2299
2300 ttypr |= tmp1;
2301 ttypr = (ttypr << 32) | tmp0;
2302 debug |= tmp3;
2303 debug = (debug << 32) | tmp2;
2304
2305 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2306 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2307 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2308
2309 if (target->private_config == NULL)
2310 return ERROR_FAIL;
2311
2312 pc = (struct aarch64_private_config *)target->private_config;
2313 if (pc->cti == NULL)
2314 return ERROR_FAIL;
2315
2316 armv8->cti = pc->cti;
2317
2318 retval = aarch64_dpm_setup(aarch64, debug);
2319 if (retval != ERROR_OK)
2320 return retval;
2321
2322 /* Setup Breakpoint Register Pairs */
2323 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2324 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2325 aarch64->brp_num_available = aarch64->brp_num;
2326 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2327 for (i = 0; i < aarch64->brp_num; i++) {
2328 aarch64->brp_list[i].used = 0;
2329 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2330 aarch64->brp_list[i].type = BRP_NORMAL;
2331 else
2332 aarch64->brp_list[i].type = BRP_CONTEXT;
2333 aarch64->brp_list[i].value = 0;
2334 aarch64->brp_list[i].control = 0;
2335 aarch64->brp_list[i].BRPn = i;
2336 }
2337
2338 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2339
2340 target->state = TARGET_UNKNOWN;
2341 target->debug_reason = DBG_REASON_NOTHALTED;
2342 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2343 target_set_examined(target);
2344 return ERROR_OK;
2345 }
2346
2347 static int aarch64_examine(struct target *target)
2348 {
2349 int retval = ERROR_OK;
2350
2351 /* don't re-probe hardware after each reset */
2352 if (!target_was_examined(target))
2353 retval = aarch64_examine_first(target);
2354
2355 /* Configure core debug access */
2356 if (retval == ERROR_OK)
2357 retval = aarch64_init_debug_access(target);
2358
2359 return retval;
2360 }
2361
2362 /*
2363 * Cortex-A8 target creation and initialization
2364 */
2365
2366 static int aarch64_init_target(struct command_context *cmd_ctx,
2367 struct target *target)
2368 {
2369 /* examine_first() does a bunch of this */
2370 arm_semihosting_init(target);
2371 return ERROR_OK;
2372 }
2373
2374 static int aarch64_init_arch_info(struct target *target,
2375 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2376 {
2377 struct armv8_common *armv8 = &aarch64->armv8_common;
2378
2379 /* Setup struct aarch64_common */
2380 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2381 armv8->arm.dap = dap;
2382
2383 /* register arch-specific functions */
2384 armv8->examine_debug_reason = NULL;
2385 armv8->post_debug_entry = aarch64_post_debug_entry;
2386 armv8->pre_restore_context = NULL;
2387 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2388
2389 armv8_init_arch_info(target, armv8);
2390 target_register_timer_callback(aarch64_handle_target_request, 1,
2391 TARGET_TIMER_TYPE_PERIODIC, target);
2392
2393 return ERROR_OK;
2394 }
2395
2396 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2397 {
2398 struct aarch64_private_config *pc = target->private_config;
2399 struct aarch64_common *aarch64;
2400
2401 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2402 return ERROR_FAIL;
2403
2404 aarch64 = calloc(1, sizeof(struct aarch64_common));
2405 if (aarch64 == NULL) {
2406 LOG_ERROR("Out of memory");
2407 return ERROR_FAIL;
2408 }
2409
2410 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2411 }
2412
2413 static void aarch64_deinit_target(struct target *target)
2414 {
2415 struct aarch64_common *aarch64 = target_to_aarch64(target);
2416 struct armv8_common *armv8 = &aarch64->armv8_common;
2417 struct arm_dpm *dpm = &armv8->dpm;
2418
2419 armv8_free_reg_cache(target);
2420 free(aarch64->brp_list);
2421 free(dpm->dbp);
2422 free(dpm->dwp);
2423 free(target->private_config);
2424 free(aarch64);
2425 }
2426
2427 static int aarch64_mmu(struct target *target, int *enabled)
2428 {
2429 if (target->state != TARGET_HALTED) {
2430 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2431 return ERROR_TARGET_INVALID;
2432 }
2433
2434 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2435 return ERROR_OK;
2436 }
2437
2438 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2439 target_addr_t *phys)
2440 {
2441 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2442 }
2443
2444 /*
2445 * private target configuration items
2446 */
2447 enum aarch64_cfg_param {
2448 CFG_CTI,
2449 };
2450
2451 static const Jim_Nvp nvp_config_opts[] = {
2452 { .name = "-cti", .value = CFG_CTI },
2453 { .name = NULL, .value = -1 }
2454 };
2455
2456 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2457 {
2458 struct aarch64_private_config *pc;
2459 Jim_Nvp *n;
2460 int e;
2461
2462 pc = (struct aarch64_private_config *)target->private_config;
2463 if (pc == NULL) {
2464 pc = calloc(1, sizeof(struct aarch64_private_config));
2465 target->private_config = pc;
2466 }
2467
2468 /*
2469 * Call adiv5_jim_configure() to parse the common DAP options
2470 * It will return JIM_CONTINUE if it didn't find any known
2471 * options, JIM_OK if it correctly parsed the topmost option
2472 * and JIM_ERR if an error occured during parameter evaluation.
2473 * For JIM_CONTINUE, we check our own params.
2474 */
2475 e = adiv5_jim_configure(target, goi);
2476 if (e != JIM_CONTINUE)
2477 return e;
2478
2479 /* parse config or cget options ... */
2480 if (goi->argc > 0) {
2481 Jim_SetEmptyResult(goi->interp);
2482
2483 /* check first if topmost item is for us */
2484 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2485 goi->argv[0], &n);
2486 if (e != JIM_OK)
2487 return JIM_CONTINUE;
2488
2489 e = Jim_GetOpt_Obj(goi, NULL);
2490 if (e != JIM_OK)
2491 return e;
2492
2493 switch (n->value) {
2494 case CFG_CTI: {
2495 if (goi->isconfigure) {
2496 Jim_Obj *o_cti;
2497 struct arm_cti *cti;
2498 e = Jim_GetOpt_Obj(goi, &o_cti);
2499 if (e != JIM_OK)
2500 return e;
2501 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2502 if (cti == NULL) {
2503 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2504 return JIM_ERR;
2505 }
2506 pc->cti = cti;
2507 } else {
2508 if (goi->argc != 0) {
2509 Jim_WrongNumArgs(goi->interp,
2510 goi->argc, goi->argv,
2511 "NO PARAMS");
2512 return JIM_ERR;
2513 }
2514
2515 if (pc == NULL || pc->cti == NULL) {
2516 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2517 return JIM_ERR;
2518 }
2519 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2520 }
2521 break;
2522 }
2523
2524 default:
2525 return JIM_CONTINUE;
2526 }
2527 }
2528
2529 return JIM_OK;
2530 }
2531
2532 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2533 {
2534 struct target *target = get_current_target(CMD_CTX);
2535 struct armv8_common *armv8 = target_to_armv8(target);
2536
2537 return armv8_handle_cache_info_command(CMD,
2538 &armv8->armv8_mmu.armv8_cache);
2539 }
2540
2541
2542 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2543 {
2544 struct target *target = get_current_target(CMD_CTX);
2545 if (!target_was_examined(target)) {
2546 LOG_ERROR("target not examined yet");
2547 return ERROR_FAIL;
2548 }
2549
2550 return aarch64_init_debug_access(target);
2551 }
2552
2553 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2554 {
2555 struct target *target = get_current_target(CMD_CTX);
2556 struct aarch64_common *aarch64 = target_to_aarch64(target);
2557
2558 static const Jim_Nvp nvp_maskisr_modes[] = {
2559 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2560 { .name = "on", .value = AARCH64_ISRMASK_ON },
2561 { .name = NULL, .value = -1 },
2562 };
2563 const Jim_Nvp *n;
2564
2565 if (CMD_ARGC > 0) {
2566 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2567 if (n->name == NULL) {
2568 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2569 return ERROR_COMMAND_SYNTAX_ERROR;
2570 }
2571
2572 aarch64->isrmasking_mode = n->value;
2573 }
2574
2575 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2576 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2577
2578 return ERROR_OK;
2579 }
2580
2581 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2582 {
2583 struct command_context *context;
2584 struct target *target;
2585 struct arm *arm;
2586 int retval;
2587 bool is_mcr = false;
2588 int arg_cnt = 0;
2589
2590 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2591 is_mcr = true;
2592 arg_cnt = 7;
2593 } else {
2594 arg_cnt = 6;
2595 }
2596
2597 context = current_command_context(interp);
2598 assert(context != NULL);
2599
2600 target = get_current_target(context);
2601 if (target == NULL) {
2602 LOG_ERROR("%s: no current target", __func__);
2603 return JIM_ERR;
2604 }
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("%s: not yet examined", target_name(target));
2607 return JIM_ERR;
2608 }
2609
2610 arm = target_to_arm(target);
2611 if (!is_arm(arm)) {
2612 LOG_ERROR("%s: not an ARM", target_name(target));
2613 return JIM_ERR;
2614 }
2615
2616 if (target->state != TARGET_HALTED)
2617 return ERROR_TARGET_NOT_HALTED;
2618
2619 if (arm->core_state == ARM_STATE_AARCH64) {
2620 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2621 return JIM_ERR;
2622 }
2623
2624 if (argc != arg_cnt) {
2625 LOG_ERROR("%s: wrong number of arguments", __func__);
2626 return JIM_ERR;
2627 }
2628
2629 int cpnum;
2630 uint32_t op1;
2631 uint32_t op2;
2632 uint32_t CRn;
2633 uint32_t CRm;
2634 uint32_t value;
2635 long l;
2636
2637 /* NOTE: parameter sequence matches ARM instruction set usage:
2638 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2639 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2640 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2641 */
2642 retval = Jim_GetLong(interp, argv[1], &l);
2643 if (retval != JIM_OK)
2644 return retval;
2645 if (l & ~0xf) {
2646 LOG_ERROR("%s: %s %d out of range", __func__,
2647 "coprocessor", (int) l);
2648 return JIM_ERR;
2649 }
2650 cpnum = l;
2651
2652 retval = Jim_GetLong(interp, argv[2], &l);
2653 if (retval != JIM_OK)
2654 return retval;
2655 if (l & ~0x7) {
2656 LOG_ERROR("%s: %s %d out of range", __func__,
2657 "op1", (int) l);
2658 return JIM_ERR;
2659 }
2660 op1 = l;
2661
2662 retval = Jim_GetLong(interp, argv[3], &l);
2663 if (retval != JIM_OK)
2664 return retval;
2665 if (l & ~0xf) {
2666 LOG_ERROR("%s: %s %d out of range", __func__,
2667 "CRn", (int) l);
2668 return JIM_ERR;
2669 }
2670 CRn = l;
2671
2672 retval = Jim_GetLong(interp, argv[4], &l);
2673 if (retval != JIM_OK)
2674 return retval;
2675 if (l & ~0xf) {
2676 LOG_ERROR("%s: %s %d out of range", __func__,
2677 "CRm", (int) l);
2678 return JIM_ERR;
2679 }
2680 CRm = l;
2681
2682 retval = Jim_GetLong(interp, argv[5], &l);
2683 if (retval != JIM_OK)
2684 return retval;
2685 if (l & ~0x7) {
2686 LOG_ERROR("%s: %s %d out of range", __func__,
2687 "op2", (int) l);
2688 return JIM_ERR;
2689 }
2690 op2 = l;
2691
2692 value = 0;
2693
2694 if (is_mcr == true) {
2695 retval = Jim_GetLong(interp, argv[6], &l);
2696 if (retval != JIM_OK)
2697 return retval;
2698 value = l;
2699
2700 /* NOTE: parameters reordered! */
2701 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2702 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2703 if (retval != ERROR_OK)
2704 return JIM_ERR;
2705 } else {
2706 /* NOTE: parameters reordered! */
2707 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2708 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2709 if (retval != ERROR_OK)
2710 return JIM_ERR;
2711
2712 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2713 }
2714
2715 return JIM_OK;
2716 }
2717
2718 static const struct command_registration aarch64_exec_command_handlers[] = {
2719 {
2720 .name = "cache_info",
2721 .handler = aarch64_handle_cache_info_command,
2722 .mode = COMMAND_EXEC,
2723 .help = "display information about target caches",
2724 .usage = "",
2725 },
2726 {
2727 .name = "dbginit",
2728 .handler = aarch64_handle_dbginit_command,
2729 .mode = COMMAND_EXEC,
2730 .help = "Initialize core debug",
2731 .usage = "",
2732 },
2733 {
2734 .name = "maskisr",
2735 .handler = aarch64_mask_interrupts_command,
2736 .mode = COMMAND_ANY,
2737 .help = "mask aarch64 interrupts during single-step",
2738 .usage = "['on'|'off']",
2739 },
2740 {
2741 .name = "mcr",
2742 .mode = COMMAND_EXEC,
2743 .jim_handler = jim_mcrmrc,
2744 .help = "write coprocessor register",
2745 .usage = "cpnum op1 CRn CRm op2 value",
2746 },
2747 {
2748 .name = "mrc",
2749 .mode = COMMAND_EXEC,
2750 .jim_handler = jim_mcrmrc,
2751 .help = "read coprocessor register",
2752 .usage = "cpnum op1 CRn CRm op2",
2753 },
2754 {
2755 .chain = smp_command_handlers,
2756 },
2757
2758
2759 COMMAND_REGISTRATION_DONE
2760 };
2761
2762 static const struct command_registration aarch64_command_handlers[] = {
2763 {
2764 .chain = armv8_command_handlers,
2765 },
2766 {
2767 .name = "aarch64",
2768 .mode = COMMAND_ANY,
2769 .help = "Aarch64 command group",
2770 .usage = "",
2771 .chain = aarch64_exec_command_handlers,
2772 },
2773 COMMAND_REGISTRATION_DONE
2774 };
2775
2776 struct target_type aarch64_target = {
2777 .name = "aarch64",
2778
2779 .poll = aarch64_poll,
2780 .arch_state = armv8_arch_state,
2781
2782 .halt = aarch64_halt,
2783 .resume = aarch64_resume,
2784 .step = aarch64_step,
2785
2786 .assert_reset = aarch64_assert_reset,
2787 .deassert_reset = aarch64_deassert_reset,
2788
2789 /* REVISIT allow exporting VFP3 registers ... */
2790 .get_gdb_arch = armv8_get_gdb_arch,
2791 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2792
2793 .read_memory = aarch64_read_memory,
2794 .write_memory = aarch64_write_memory,
2795
2796 .add_breakpoint = aarch64_add_breakpoint,
2797 .add_context_breakpoint = aarch64_add_context_breakpoint,
2798 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2799 .remove_breakpoint = aarch64_remove_breakpoint,
2800 .add_watchpoint = NULL,
2801 .remove_watchpoint = NULL,
2802
2803 .commands = aarch64_command_handlers,
2804 .target_create = aarch64_target_create,
2805 .target_jim_configure = aarch64_jim_configure,
2806 .init_target = aarch64_init_target,
2807 .deinit_target = aarch64_deinit_target,
2808 .examine = aarch64_examine,
2809
2810 .read_phys_memory = aarch64_read_phys_memory,
2811 .write_phys_memory = aarch64_write_phys_memory,
2812 .mmu = aarch64_mmu,
2813 .virt2phys = aarch64_virt2phys,
2814 };