9e8983f3f3193f39b96e2abc41e43c5f26d6493a
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include "smp.h"
33 #include <helper/time_support.h>
34
35 enum restart_mode {
36 RESTART_LAZY,
37 RESTART_SYNC,
38 };
39
40 enum halt_mode {
41 HALT_LAZY,
42 HALT_SYNC,
43 };
44
45 struct aarch64_private_config {
46 struct adiv5_private_config adiv5_config;
47 struct arm_cti *cti;
48 };
49
50 static int aarch64_poll(struct target *target);
51 static int aarch64_debug_entry(struct target *target);
52 static int aarch64_restore_context(struct target *target, bool bpwp);
53 static int aarch64_set_breakpoint(struct target *target,
54 struct breakpoint *breakpoint, uint8_t matchmode);
55 static int aarch64_set_context_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int aarch64_set_hybrid_breakpoint(struct target *target,
58 struct breakpoint *breakpoint);
59 static int aarch64_unset_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int aarch64_mmu(struct target *target, int *enabled);
62 static int aarch64_virt2phys(struct target *target,
63 target_addr_t virt, target_addr_t *phys);
64 static int aarch64_read_cpu_memory(struct target *target,
65 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
66
67 static int aarch64_restore_system_control_reg(struct target *target)
68 {
69 enum arm_mode target_mode = ARM_MODE_ANY;
70 int retval = ERROR_OK;
71 uint32_t instr;
72
73 struct aarch64_common *aarch64 = target_to_aarch64(target);
74 struct armv8_common *armv8 = target_to_armv8(target);
75
76 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
77 aarch64->system_control_reg_curr = aarch64->system_control_reg;
78 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
79
80 switch (armv8->arm.core_mode) {
81 case ARMV8_64_EL0T:
82 target_mode = ARMV8_64_EL1H;
83 /* fall through */
84 case ARMV8_64_EL1T:
85 case ARMV8_64_EL1H:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
87 break;
88 case ARMV8_64_EL2T:
89 case ARMV8_64_EL2H:
90 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
91 break;
92 case ARMV8_64_EL3H:
93 case ARMV8_64_EL3T:
94 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
95 break;
96
97 case ARM_MODE_SVC:
98 case ARM_MODE_ABT:
99 case ARM_MODE_FIQ:
100 case ARM_MODE_IRQ:
101 case ARM_MODE_SYS:
102 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
103 break;
104
105 default:
106 LOG_INFO("cannot read system control register in this mode");
107 return ERROR_FAIL;
108 }
109
110 if (target_mode != ARM_MODE_ANY)
111 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
112
113 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
114 if (retval != ERROR_OK)
115 return retval;
116
117 if (target_mode != ARM_MODE_ANY)
118 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
119 }
120
121 return retval;
122 }
123
124 /* modify system_control_reg in order to enable or disable mmu for :
125 * - virt2phys address conversion
126 * - read or write memory in phys or virt address */
127 static int aarch64_mmu_modify(struct target *target, int enable)
128 {
129 struct aarch64_common *aarch64 = target_to_aarch64(target);
130 struct armv8_common *armv8 = &aarch64->armv8_common;
131 int retval = ERROR_OK;
132 uint32_t instr = 0;
133
134 if (enable) {
135 /* if mmu enabled at target stop and mmu not enable */
136 if (!(aarch64->system_control_reg & 0x1U)) {
137 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
138 return ERROR_FAIL;
139 }
140 if (!(aarch64->system_control_reg_curr & 0x1U))
141 aarch64->system_control_reg_curr |= 0x1U;
142 } else {
143 if (aarch64->system_control_reg_curr & 0x4U) {
144 /* data cache is active */
145 aarch64->system_control_reg_curr &= ~0x4U;
146 /* flush data cache armv8 function to be called */
147 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
148 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
149 }
150 if ((aarch64->system_control_reg_curr & 0x1U)) {
151 aarch64->system_control_reg_curr &= ~0x1U;
152 }
153 }
154
155 switch (armv8->arm.core_mode) {
156 case ARMV8_64_EL0T:
157 case ARMV8_64_EL1T:
158 case ARMV8_64_EL1H:
159 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
160 break;
161 case ARMV8_64_EL2T:
162 case ARMV8_64_EL2H:
163 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
164 break;
165 case ARMV8_64_EL3H:
166 case ARMV8_64_EL3T:
167 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
168 break;
169
170 case ARM_MODE_SVC:
171 case ARM_MODE_ABT:
172 case ARM_MODE_FIQ:
173 case ARM_MODE_IRQ:
174 case ARM_MODE_SYS:
175 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
176 break;
177
178 default:
179 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
180 break;
181 }
182
183 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
184 aarch64->system_control_reg_curr);
185 return retval;
186 }
187
188 /*
189 * Basic debug access, very low level assumes state is saved
190 */
191 static int aarch64_init_debug_access(struct target *target)
192 {
193 struct armv8_common *armv8 = target_to_armv8(target);
194 int retval;
195 uint32_t dummy;
196
197 LOG_DEBUG("%s", target_name(target));
198
199 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
200 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
201 if (retval != ERROR_OK) {
202 LOG_DEBUG("Examine %s failed", "oslock");
203 return retval;
204 }
205
206 /* Clear Sticky Power Down status Bit in PRSR to enable access to
207 the registers in the Core Power Domain */
208 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
209 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
210 if (retval != ERROR_OK)
211 return retval;
212
213 /*
214 * Static CTI configuration:
215 * Channel 0 -> trigger outputs HALT request to PE
216 * Channel 1 -> trigger outputs Resume request to PE
217 * Gate all channel trigger events from entering the CTM
218 */
219
220 /* Enable CTI */
221 retval = arm_cti_enable(armv8->cti, true);
222 /* By default, gate all channel events to and from the CTM */
223 if (retval == ERROR_OK)
224 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
225 /* output halt requests to PE on channel 0 event */
226 if (retval == ERROR_OK)
227 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
228 /* output restart requests to PE on channel 1 event */
229 if (retval == ERROR_OK)
230 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Resync breakpoint registers */
235
236 return ERROR_OK;
237 }
238
239 /* Write to memory mapped registers directly with no cache or mmu handling */
240 static int aarch64_dap_write_memap_register_u32(struct target *target,
241 uint32_t address,
242 uint32_t value)
243 {
244 int retval;
245 struct armv8_common *armv8 = target_to_armv8(target);
246
247 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
248
249 return retval;
250 }
251
252 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
253 {
254 struct arm_dpm *dpm = &a8->armv8_common.dpm;
255 int retval;
256
257 dpm->arm = &a8->armv8_common.arm;
258 dpm->didr = debug;
259
260 retval = armv8_dpm_setup(dpm);
261 if (retval == ERROR_OK)
262 retval = armv8_dpm_initialize(dpm);
263
264 return retval;
265 }
266
267 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
268 {
269 struct armv8_common *armv8 = target_to_armv8(target);
270 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
271 }
272
273 static int aarch64_check_state_one(struct target *target,
274 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
275 {
276 struct armv8_common *armv8 = target_to_armv8(target);
277 uint32_t prsr;
278 int retval;
279
280 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
281 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
282 if (retval != ERROR_OK)
283 return retval;
284
285 if (p_prsr)
286 *p_prsr = prsr;
287
288 if (p_result)
289 *p_result = (prsr & mask) == (val & mask);
290
291 return ERROR_OK;
292 }
293
294 static int aarch64_wait_halt_one(struct target *target)
295 {
296 int retval = ERROR_OK;
297 uint32_t prsr;
298
299 int64_t then = timeval_ms();
300 for (;;) {
301 int halted;
302
303 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
304 if (retval != ERROR_OK || halted)
305 break;
306
307 if (timeval_ms() > then + 1000) {
308 retval = ERROR_TARGET_TIMEOUT;
309 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
310 break;
311 }
312 }
313 return retval;
314 }
315
316 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
317 {
318 int retval = ERROR_OK;
319 struct target_list *head = target->head;
320 struct target *first = NULL;
321
322 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
323
324 while (head != NULL) {
325 struct target *curr = head->target;
326 struct armv8_common *armv8 = target_to_armv8(curr);
327 head = head->next;
328
329 if (exc_target && curr == target)
330 continue;
331 if (!target_was_examined(curr))
332 continue;
333 if (curr->state != TARGET_RUNNING)
334 continue;
335
336 /* HACK: mark this target as prepared for halting */
337 curr->debug_reason = DBG_REASON_DBGRQ;
338
339 /* open the gate for channel 0 to let HALT requests pass to the CTM */
340 retval = arm_cti_ungate_channel(armv8->cti, 0);
341 if (retval == ERROR_OK)
342 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
343 if (retval != ERROR_OK)
344 break;
345
346 LOG_DEBUG("target %s prepared", target_name(curr));
347
348 if (first == NULL)
349 first = curr;
350 }
351
352 if (p_first) {
353 if (exc_target && first)
354 *p_first = first;
355 else
356 *p_first = target;
357 }
358
359 return retval;
360 }
361
362 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
363 {
364 int retval = ERROR_OK;
365 struct armv8_common *armv8 = target_to_armv8(target);
366
367 LOG_DEBUG("%s", target_name(target));
368
369 /* allow Halting Debug Mode */
370 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
371 if (retval != ERROR_OK)
372 return retval;
373
374 /* trigger an event on channel 0, this outputs a halt request to the PE */
375 retval = arm_cti_pulse_channel(armv8->cti, 0);
376 if (retval != ERROR_OK)
377 return retval;
378
379 if (mode == HALT_SYNC) {
380 retval = aarch64_wait_halt_one(target);
381 if (retval != ERROR_OK) {
382 if (retval == ERROR_TARGET_TIMEOUT)
383 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
384 return retval;
385 }
386 }
387
388 return ERROR_OK;
389 }
390
391 static int aarch64_halt_smp(struct target *target, bool exc_target)
392 {
393 struct target *next = target;
394 int retval;
395
396 /* prepare halt on all PEs of the group */
397 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
398
399 if (exc_target && next == target)
400 return retval;
401
402 /* halt the target PE */
403 if (retval == ERROR_OK)
404 retval = aarch64_halt_one(next, HALT_LAZY);
405
406 if (retval != ERROR_OK)
407 return retval;
408
409 /* wait for all PEs to halt */
410 int64_t then = timeval_ms();
411 for (;;) {
412 bool all_halted = true;
413 struct target_list *head;
414 struct target *curr;
415
416 foreach_smp_target(head, target->head) {
417 int halted;
418
419 curr = head->target;
420
421 if (!target_was_examined(curr))
422 continue;
423
424 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
425 if (retval != ERROR_OK || !halted) {
426 all_halted = false;
427 break;
428 }
429 }
430
431 if (all_halted)
432 break;
433
434 if (timeval_ms() > then + 1000) {
435 retval = ERROR_TARGET_TIMEOUT;
436 break;
437 }
438
439 /*
440 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
441 * and it looks like the CTI's are not connected by a common
442 * trigger matrix. It seems that we need to halt one core in each
443 * cluster explicitly. So if we find that a core has not halted
444 * yet, we trigger an explicit halt for the second cluster.
445 */
446 retval = aarch64_halt_one(curr, HALT_LAZY);
447 if (retval != ERROR_OK)
448 break;
449 }
450
451 return retval;
452 }
453
454 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
455 {
456 struct target *gdb_target = NULL;
457 struct target_list *head;
458 struct target *curr;
459
460 if (debug_reason == DBG_REASON_NOTHALTED) {
461 LOG_DEBUG("Halting remaining targets in SMP group");
462 aarch64_halt_smp(target, true);
463 }
464
465 /* poll all targets in the group, but skip the target that serves GDB */
466 foreach_smp_target(head, target->head) {
467 curr = head->target;
468 /* skip calling context */
469 if (curr == target)
470 continue;
471 if (!target_was_examined(curr))
472 continue;
473 /* skip targets that were already halted */
474 if (curr->state == TARGET_HALTED)
475 continue;
476 /* remember the gdb_service->target */
477 if (curr->gdb_service != NULL)
478 gdb_target = curr->gdb_service->target;
479 /* skip it */
480 if (curr == gdb_target)
481 continue;
482
483 /* avoid recursion in aarch64_poll() */
484 curr->smp = 0;
485 aarch64_poll(curr);
486 curr->smp = 1;
487 }
488
489 /* after all targets were updated, poll the gdb serving target */
490 if (gdb_target != NULL && gdb_target != target)
491 aarch64_poll(gdb_target);
492
493 return ERROR_OK;
494 }
495
496 /*
497 * Aarch64 Run control
498 */
499
500 static int aarch64_poll(struct target *target)
501 {
502 enum target_state prev_target_state;
503 int retval = ERROR_OK;
504 int halted;
505
506 retval = aarch64_check_state_one(target,
507 PRSR_HALT, PRSR_HALT, &halted, NULL);
508 if (retval != ERROR_OK)
509 return retval;
510
511 if (halted) {
512 prev_target_state = target->state;
513 if (prev_target_state != TARGET_HALTED) {
514 enum target_debug_reason debug_reason = target->debug_reason;
515
516 /* We have a halting debug event */
517 target->state = TARGET_HALTED;
518 LOG_DEBUG("Target %s halted", target_name(target));
519 retval = aarch64_debug_entry(target);
520 if (retval != ERROR_OK)
521 return retval;
522
523 if (target->smp)
524 update_halt_gdb(target, debug_reason);
525
526 if (arm_semihosting(target, &retval) != 0)
527 return retval;
528
529 switch (prev_target_state) {
530 case TARGET_RUNNING:
531 case TARGET_UNKNOWN:
532 case TARGET_RESET:
533 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
534 break;
535 case TARGET_DEBUG_RUNNING:
536 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
537 break;
538 default:
539 break;
540 }
541 }
542 } else
543 target->state = TARGET_RUNNING;
544
545 return retval;
546 }
547
548 static int aarch64_halt(struct target *target)
549 {
550 struct armv8_common *armv8 = target_to_armv8(target);
551 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
552
553 if (target->smp)
554 return aarch64_halt_smp(target, false);
555
556 return aarch64_halt_one(target, HALT_SYNC);
557 }
558
559 static int aarch64_restore_one(struct target *target, int current,
560 uint64_t *address, int handle_breakpoints, int debug_execution)
561 {
562 struct armv8_common *armv8 = target_to_armv8(target);
563 struct arm *arm = &armv8->arm;
564 int retval;
565 uint64_t resume_pc;
566
567 LOG_DEBUG("%s", target_name(target));
568
569 if (!debug_execution)
570 target_free_all_working_areas(target);
571
572 /* current = 1: continue on current pc, otherwise continue at <address> */
573 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
574 if (!current)
575 resume_pc = *address;
576 else
577 *address = resume_pc;
578
579 /* Make sure that the Armv7 gdb thumb fixups does not
580 * kill the return address
581 */
582 switch (arm->core_state) {
583 case ARM_STATE_ARM:
584 resume_pc &= 0xFFFFFFFC;
585 break;
586 case ARM_STATE_AARCH64:
587 resume_pc &= 0xFFFFFFFFFFFFFFFC;
588 break;
589 case ARM_STATE_THUMB:
590 case ARM_STATE_THUMB_EE:
591 /* When the return address is loaded into PC
592 * bit 0 must be 1 to stay in Thumb state
593 */
594 resume_pc |= 0x1;
595 break;
596 case ARM_STATE_JAZELLE:
597 LOG_ERROR("How do I resume into Jazelle state??");
598 return ERROR_FAIL;
599 }
600 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
601 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
602 arm->pc->dirty = true;
603 arm->pc->valid = true;
604
605 /* called it now before restoring context because it uses cpu
606 * register r0 for restoring system control register */
607 retval = aarch64_restore_system_control_reg(target);
608 if (retval == ERROR_OK)
609 retval = aarch64_restore_context(target, handle_breakpoints);
610
611 return retval;
612 }
613
614 /**
615 * prepare single target for restart
616 *
617 *
618 */
619 static int aarch64_prepare_restart_one(struct target *target)
620 {
621 struct armv8_common *armv8 = target_to_armv8(target);
622 int retval;
623 uint32_t dscr;
624 uint32_t tmp;
625
626 LOG_DEBUG("%s", target_name(target));
627
628 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
629 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
630 if (retval != ERROR_OK)
631 return retval;
632
633 if ((dscr & DSCR_ITE) == 0)
634 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
635 if ((dscr & DSCR_ERR) != 0)
636 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
637
638 /* acknowledge a pending CTI halt event */
639 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
640 /*
641 * open the CTI gate for channel 1 so that the restart events
642 * get passed along to all PEs. Also close gate for channel 0
643 * to isolate the PE from halt events.
644 */
645 if (retval == ERROR_OK)
646 retval = arm_cti_ungate_channel(armv8->cti, 1);
647 if (retval == ERROR_OK)
648 retval = arm_cti_gate_channel(armv8->cti, 0);
649
650 /* make sure that DSCR.HDE is set */
651 if (retval == ERROR_OK) {
652 dscr |= DSCR_HDE;
653 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
654 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
655 }
656
657 if (retval == ERROR_OK) {
658 /* clear sticky bits in PRSR, SDR is now 0 */
659 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
660 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
661 }
662
663 return retval;
664 }
665
666 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
667 {
668 struct armv8_common *armv8 = target_to_armv8(target);
669 int retval;
670
671 LOG_DEBUG("%s", target_name(target));
672
673 /* trigger an event on channel 1, generates a restart request to the PE */
674 retval = arm_cti_pulse_channel(armv8->cti, 1);
675 if (retval != ERROR_OK)
676 return retval;
677
678 if (mode == RESTART_SYNC) {
679 int64_t then = timeval_ms();
680 for (;;) {
681 int resumed;
682 /*
683 * if PRSR.SDR is set now, the target did restart, even
684 * if it's now already halted again (e.g. due to breakpoint)
685 */
686 retval = aarch64_check_state_one(target,
687 PRSR_SDR, PRSR_SDR, &resumed, NULL);
688 if (retval != ERROR_OK || resumed)
689 break;
690
691 if (timeval_ms() > then + 1000) {
692 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
693 retval = ERROR_TARGET_TIMEOUT;
694 break;
695 }
696 }
697 }
698
699 if (retval != ERROR_OK)
700 return retval;
701
702 target->debug_reason = DBG_REASON_NOTHALTED;
703 target->state = TARGET_RUNNING;
704
705 return ERROR_OK;
706 }
707
708 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
709 {
710 int retval;
711
712 LOG_DEBUG("%s", target_name(target));
713
714 retval = aarch64_prepare_restart_one(target);
715 if (retval == ERROR_OK)
716 retval = aarch64_do_restart_one(target, mode);
717
718 return retval;
719 }
720
721 /*
722 * prepare all but the current target for restart
723 */
724 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
725 {
726 int retval = ERROR_OK;
727 struct target_list *head;
728 struct target *first = NULL;
729 uint64_t address;
730
731 foreach_smp_target(head, target->head) {
732 struct target *curr = head->target;
733
734 /* skip calling target */
735 if (curr == target)
736 continue;
737 if (!target_was_examined(curr))
738 continue;
739 if (curr->state != TARGET_HALTED)
740 continue;
741
742 /* resume at current address, not in step mode */
743 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
744 if (retval == ERROR_OK)
745 retval = aarch64_prepare_restart_one(curr);
746 if (retval != ERROR_OK) {
747 LOG_ERROR("failed to restore target %s", target_name(curr));
748 break;
749 }
750 /* remember the first valid target in the group */
751 if (first == NULL)
752 first = curr;
753 }
754
755 if (p_first)
756 *p_first = first;
757
758 return retval;
759 }
760
761
762 static int aarch64_step_restart_smp(struct target *target)
763 {
764 int retval = ERROR_OK;
765 struct target_list *head;
766 struct target *first = NULL;
767
768 LOG_DEBUG("%s", target_name(target));
769
770 retval = aarch64_prep_restart_smp(target, 0, &first);
771 if (retval != ERROR_OK)
772 return retval;
773
774 if (first != NULL)
775 retval = aarch64_do_restart_one(first, RESTART_LAZY);
776 if (retval != ERROR_OK) {
777 LOG_DEBUG("error restarting target %s", target_name(first));
778 return retval;
779 }
780
781 int64_t then = timeval_ms();
782 for (;;) {
783 struct target *curr = target;
784 bool all_resumed = true;
785
786 foreach_smp_target(head, target->head) {
787 uint32_t prsr;
788 int resumed;
789
790 curr = head->target;
791
792 if (curr == target)
793 continue;
794
795 if (!target_was_examined(curr))
796 continue;
797
798 retval = aarch64_check_state_one(curr,
799 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
800 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
801 all_resumed = false;
802 break;
803 }
804
805 if (curr->state != TARGET_RUNNING) {
806 curr->state = TARGET_RUNNING;
807 curr->debug_reason = DBG_REASON_NOTHALTED;
808 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
809 }
810 }
811
812 if (all_resumed)
813 break;
814
815 if (timeval_ms() > then + 1000) {
816 LOG_ERROR("%s: timeout waiting for target resume", __func__);
817 retval = ERROR_TARGET_TIMEOUT;
818 break;
819 }
820 /*
821 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
822 * and it looks like the CTI's are not connected by a common
823 * trigger matrix. It seems that we need to halt one core in each
824 * cluster explicitly. So if we find that a core has not halted
825 * yet, we trigger an explicit resume for the second cluster.
826 */
827 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
828 if (retval != ERROR_OK)
829 break;
830 }
831
832 return retval;
833 }
834
835 static int aarch64_resume(struct target *target, int current,
836 target_addr_t address, int handle_breakpoints, int debug_execution)
837 {
838 int retval = 0;
839 uint64_t addr = address;
840
841 struct armv8_common *armv8 = target_to_armv8(target);
842 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
843
844 if (target->state != TARGET_HALTED)
845 return ERROR_TARGET_NOT_HALTED;
846
847 /*
848 * If this target is part of a SMP group, prepare the others
849 * targets for resuming. This involves restoring the complete
850 * target register context and setting up CTI gates to accept
851 * resume events from the trigger matrix.
852 */
853 if (target->smp) {
854 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
855 if (retval != ERROR_OK)
856 return retval;
857 }
858
859 /* all targets prepared, restore and restart the current target */
860 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
861 debug_execution);
862 if (retval == ERROR_OK)
863 retval = aarch64_restart_one(target, RESTART_SYNC);
864 if (retval != ERROR_OK)
865 return retval;
866
867 if (target->smp) {
868 int64_t then = timeval_ms();
869 for (;;) {
870 struct target *curr = target;
871 struct target_list *head;
872 bool all_resumed = true;
873
874 foreach_smp_target(head, target->head) {
875 uint32_t prsr;
876 int resumed;
877
878 curr = head->target;
879 if (curr == target)
880 continue;
881 if (!target_was_examined(curr))
882 continue;
883
884 retval = aarch64_check_state_one(curr,
885 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
886 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
887 all_resumed = false;
888 break;
889 }
890
891 if (curr->state != TARGET_RUNNING) {
892 curr->state = TARGET_RUNNING;
893 curr->debug_reason = DBG_REASON_NOTHALTED;
894 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
895 }
896 }
897
898 if (all_resumed)
899 break;
900
901 if (timeval_ms() > then + 1000) {
902 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
903 retval = ERROR_TARGET_TIMEOUT;
904 break;
905 }
906
907 /*
908 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
909 * and it looks like the CTI's are not connected by a common
910 * trigger matrix. It seems that we need to halt one core in each
911 * cluster explicitly. So if we find that a core has not halted
912 * yet, we trigger an explicit resume for the second cluster.
913 */
914 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
915 if (retval != ERROR_OK)
916 break;
917 }
918 }
919
920 if (retval != ERROR_OK)
921 return retval;
922
923 target->debug_reason = DBG_REASON_NOTHALTED;
924
925 if (!debug_execution) {
926 target->state = TARGET_RUNNING;
927 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
928 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
929 } else {
930 target->state = TARGET_DEBUG_RUNNING;
931 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
932 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
933 }
934
935 return ERROR_OK;
936 }
937
938 static int aarch64_debug_entry(struct target *target)
939 {
940 int retval = ERROR_OK;
941 struct armv8_common *armv8 = target_to_armv8(target);
942 struct arm_dpm *dpm = &armv8->dpm;
943 enum arm_state core_state;
944 uint32_t dscr;
945
946 /* make sure to clear all sticky errors */
947 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
948 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
949 if (retval == ERROR_OK)
950 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
951 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
952 if (retval == ERROR_OK)
953 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
954
955 if (retval != ERROR_OK)
956 return retval;
957
958 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
959
960 dpm->dscr = dscr;
961 core_state = armv8_dpm_get_core_state(dpm);
962 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
963 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
964
965 /* close the CTI gate for all events */
966 if (retval == ERROR_OK)
967 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
968 /* discard async exceptions */
969 if (retval == ERROR_OK)
970 retval = dpm->instr_cpsr_sync(dpm);
971 if (retval != ERROR_OK)
972 return retval;
973
974 /* Examine debug reason */
975 armv8_dpm_report_dscr(dpm, dscr);
976
977 /* save address of instruction that triggered the watchpoint? */
978 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
979 uint32_t tmp;
980 uint64_t wfar = 0;
981
982 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
983 armv8->debug_base + CPUV8_DBG_WFAR1,
984 &tmp);
985 if (retval != ERROR_OK)
986 return retval;
987 wfar = tmp;
988 wfar = (wfar << 32);
989 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
990 armv8->debug_base + CPUV8_DBG_WFAR0,
991 &tmp);
992 if (retval != ERROR_OK)
993 return retval;
994 wfar |= tmp;
995 armv8_dpm_report_wfar(&armv8->dpm, wfar);
996 }
997
998 retval = armv8_dpm_read_current_registers(&armv8->dpm);
999
1000 if (retval == ERROR_OK && armv8->post_debug_entry)
1001 retval = armv8->post_debug_entry(target);
1002
1003 return retval;
1004 }
1005
1006 static int aarch64_post_debug_entry(struct target *target)
1007 {
1008 struct aarch64_common *aarch64 = target_to_aarch64(target);
1009 struct armv8_common *armv8 = &aarch64->armv8_common;
1010 int retval;
1011 enum arm_mode target_mode = ARM_MODE_ANY;
1012 uint32_t instr;
1013
1014 switch (armv8->arm.core_mode) {
1015 case ARMV8_64_EL0T:
1016 target_mode = ARMV8_64_EL1H;
1017 /* fall through */
1018 case ARMV8_64_EL1T:
1019 case ARMV8_64_EL1H:
1020 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1021 break;
1022 case ARMV8_64_EL2T:
1023 case ARMV8_64_EL2H:
1024 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1025 break;
1026 case ARMV8_64_EL3H:
1027 case ARMV8_64_EL3T:
1028 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1029 break;
1030
1031 case ARM_MODE_SVC:
1032 case ARM_MODE_ABT:
1033 case ARM_MODE_FIQ:
1034 case ARM_MODE_IRQ:
1035 case ARM_MODE_SYS:
1036 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1037 break;
1038
1039 default:
1040 LOG_INFO("cannot read system control register in this mode");
1041 return ERROR_FAIL;
1042 }
1043
1044 if (target_mode != ARM_MODE_ANY)
1045 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1046
1047 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1048 if (retval != ERROR_OK)
1049 return retval;
1050
1051 if (target_mode != ARM_MODE_ANY)
1052 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1053
1054 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1055 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1056
1057 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1058 armv8_identify_cache(armv8);
1059 armv8_read_mpidr(armv8);
1060 }
1061
1062 armv8->armv8_mmu.mmu_enabled =
1063 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1064 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1065 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1066 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1067 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1068 return ERROR_OK;
1069 }
1070
1071 /*
1072 * single-step a target
1073 */
1074 static int aarch64_step(struct target *target, int current, target_addr_t address,
1075 int handle_breakpoints)
1076 {
1077 struct armv8_common *armv8 = target_to_armv8(target);
1078 struct aarch64_common *aarch64 = target_to_aarch64(target);
1079 int saved_retval = ERROR_OK;
1080 int retval;
1081 uint32_t edecr;
1082
1083 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1084
1085 if (target->state != TARGET_HALTED) {
1086 LOG_WARNING("target not halted");
1087 return ERROR_TARGET_NOT_HALTED;
1088 }
1089
1090 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1091 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1092 /* make sure EDECR.SS is not set when restoring the register */
1093
1094 if (retval == ERROR_OK) {
1095 edecr &= ~0x4;
1096 /* set EDECR.SS to enter hardware step mode */
1097 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1098 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1099 }
1100 /* disable interrupts while stepping */
1101 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1102 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1103 /* bail out if stepping setup has failed */
1104 if (retval != ERROR_OK)
1105 return retval;
1106
1107 if (target->smp && (current == 1)) {
1108 /*
1109 * isolate current target so that it doesn't get resumed
1110 * together with the others
1111 */
1112 retval = arm_cti_gate_channel(armv8->cti, 1);
1113 /* resume all other targets in the group */
1114 if (retval == ERROR_OK)
1115 retval = aarch64_step_restart_smp(target);
1116 if (retval != ERROR_OK) {
1117 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1118 return retval;
1119 }
1120 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1121 }
1122
1123 /* all other targets running, restore and restart the current target */
1124 retval = aarch64_restore_one(target, current, &address, 0, 0);
1125 if (retval == ERROR_OK)
1126 retval = aarch64_restart_one(target, RESTART_LAZY);
1127
1128 if (retval != ERROR_OK)
1129 return retval;
1130
1131 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1132 if (!handle_breakpoints)
1133 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1134
1135 int64_t then = timeval_ms();
1136 for (;;) {
1137 int stepped;
1138 uint32_t prsr;
1139
1140 retval = aarch64_check_state_one(target,
1141 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1142 if (retval != ERROR_OK || stepped)
1143 break;
1144
1145 if (timeval_ms() > then + 100) {
1146 LOG_ERROR("timeout waiting for target %s halt after step",
1147 target_name(target));
1148 retval = ERROR_TARGET_TIMEOUT;
1149 break;
1150 }
1151 }
1152
1153 /*
1154 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1155 * causes a timeout. The core takes the step but doesn't complete it and so
1156 * debug state is never entered. However, you can manually halt the core
1157 * as an external debug even is also a WFI wakeup event.
1158 */
1159 if (retval == ERROR_TARGET_TIMEOUT)
1160 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1161
1162 /* restore EDECR */
1163 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1164 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1165 if (retval != ERROR_OK)
1166 return retval;
1167
1168 /* restore interrupts */
1169 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1170 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1171 if (retval != ERROR_OK)
1172 return ERROR_OK;
1173 }
1174
1175 if (saved_retval != ERROR_OK)
1176 return saved_retval;
1177
1178 return aarch64_poll(target);
1179 }
1180
1181 static int aarch64_restore_context(struct target *target, bool bpwp)
1182 {
1183 struct armv8_common *armv8 = target_to_armv8(target);
1184 struct arm *arm = &armv8->arm;
1185
1186 int retval;
1187
1188 LOG_DEBUG("%s", target_name(target));
1189
1190 if (armv8->pre_restore_context)
1191 armv8->pre_restore_context(target);
1192
1193 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1194 if (retval == ERROR_OK) {
1195 /* registers are now invalid */
1196 register_cache_invalidate(arm->core_cache);
1197 register_cache_invalidate(arm->core_cache->next);
1198 }
1199
1200 return retval;
1201 }
1202
1203 /*
1204 * Cortex-A8 Breakpoint and watchpoint functions
1205 */
1206
1207 /* Setup hardware Breakpoint Register Pair */
1208 static int aarch64_set_breakpoint(struct target *target,
1209 struct breakpoint *breakpoint, uint8_t matchmode)
1210 {
1211 int retval;
1212 int brp_i = 0;
1213 uint32_t control;
1214 uint8_t byte_addr_select = 0x0F;
1215 struct aarch64_common *aarch64 = target_to_aarch64(target);
1216 struct armv8_common *armv8 = &aarch64->armv8_common;
1217 struct aarch64_brp *brp_list = aarch64->brp_list;
1218
1219 if (breakpoint->set) {
1220 LOG_WARNING("breakpoint already set");
1221 return ERROR_OK;
1222 }
1223
1224 if (breakpoint->type == BKPT_HARD) {
1225 int64_t bpt_value;
1226 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1227 brp_i++;
1228 if (brp_i >= aarch64->brp_num) {
1229 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1230 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1231 }
1232 breakpoint->set = brp_i + 1;
1233 if (breakpoint->length == 2)
1234 byte_addr_select = (3 << (breakpoint->address & 0x02));
1235 control = ((matchmode & 0x7) << 20)
1236 | (1 << 13)
1237 | (byte_addr_select << 5)
1238 | (3 << 1) | 1;
1239 brp_list[brp_i].used = 1;
1240 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1241 brp_list[brp_i].control = control;
1242 bpt_value = brp_list[brp_i].value;
1243
1244 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1245 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1246 (uint32_t)(bpt_value & 0xFFFFFFFF));
1247 if (retval != ERROR_OK)
1248 return retval;
1249 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1250 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1251 (uint32_t)(bpt_value >> 32));
1252 if (retval != ERROR_OK)
1253 return retval;
1254
1255 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1256 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1257 brp_list[brp_i].control);
1258 if (retval != ERROR_OK)
1259 return retval;
1260 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1261 brp_list[brp_i].control,
1262 brp_list[brp_i].value);
1263
1264 } else if (breakpoint->type == BKPT_SOFT) {
1265 uint8_t code[4];
1266
1267 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1268 retval = target_read_memory(target,
1269 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1270 breakpoint->length, 1,
1271 breakpoint->orig_instr);
1272 if (retval != ERROR_OK)
1273 return retval;
1274
1275 armv8_cache_d_inner_flush_virt(armv8,
1276 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1277 breakpoint->length);
1278
1279 retval = target_write_memory(target,
1280 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1281 breakpoint->length, 1, code);
1282 if (retval != ERROR_OK)
1283 return retval;
1284
1285 armv8_cache_d_inner_flush_virt(armv8,
1286 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1287 breakpoint->length);
1288
1289 armv8_cache_i_inner_inval_virt(armv8,
1290 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1291 breakpoint->length);
1292
1293 breakpoint->set = 0x11; /* Any nice value but 0 */
1294 }
1295
1296 /* Ensure that halting debug mode is enable */
1297 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1298 if (retval != ERROR_OK) {
1299 LOG_DEBUG("Failed to set DSCR.HDE");
1300 return retval;
1301 }
1302
1303 return ERROR_OK;
1304 }
1305
1306 static int aarch64_set_context_breakpoint(struct target *target,
1307 struct breakpoint *breakpoint, uint8_t matchmode)
1308 {
1309 int retval = ERROR_FAIL;
1310 int brp_i = 0;
1311 uint32_t control;
1312 uint8_t byte_addr_select = 0x0F;
1313 struct aarch64_common *aarch64 = target_to_aarch64(target);
1314 struct armv8_common *armv8 = &aarch64->armv8_common;
1315 struct aarch64_brp *brp_list = aarch64->brp_list;
1316
1317 if (breakpoint->set) {
1318 LOG_WARNING("breakpoint already set");
1319 return retval;
1320 }
1321 /*check available context BRPs*/
1322 while ((brp_list[brp_i].used ||
1323 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1324 brp_i++;
1325
1326 if (brp_i >= aarch64->brp_num) {
1327 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1328 return ERROR_FAIL;
1329 }
1330
1331 breakpoint->set = brp_i + 1;
1332 control = ((matchmode & 0x7) << 20)
1333 | (1 << 13)
1334 | (byte_addr_select << 5)
1335 | (3 << 1) | 1;
1336 brp_list[brp_i].used = 1;
1337 brp_list[brp_i].value = (breakpoint->asid);
1338 brp_list[brp_i].control = control;
1339 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1340 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1341 brp_list[brp_i].value);
1342 if (retval != ERROR_OK)
1343 return retval;
1344 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1345 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1346 brp_list[brp_i].control);
1347 if (retval != ERROR_OK)
1348 return retval;
1349 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1350 brp_list[brp_i].control,
1351 brp_list[brp_i].value);
1352 return ERROR_OK;
1353
1354 }
1355
1356 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1357 {
1358 int retval = ERROR_FAIL;
1359 int brp_1 = 0; /* holds the contextID pair */
1360 int brp_2 = 0; /* holds the IVA pair */
1361 uint32_t control_CTX, control_IVA;
1362 uint8_t CTX_byte_addr_select = 0x0F;
1363 uint8_t IVA_byte_addr_select = 0x0F;
1364 uint8_t CTX_machmode = 0x03;
1365 uint8_t IVA_machmode = 0x01;
1366 struct aarch64_common *aarch64 = target_to_aarch64(target);
1367 struct armv8_common *armv8 = &aarch64->armv8_common;
1368 struct aarch64_brp *brp_list = aarch64->brp_list;
1369
1370 if (breakpoint->set) {
1371 LOG_WARNING("breakpoint already set");
1372 return retval;
1373 }
1374 /*check available context BRPs*/
1375 while ((brp_list[brp_1].used ||
1376 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1377 brp_1++;
1378
1379 printf("brp(CTX) found num: %d\n", brp_1);
1380 if (brp_1 >= aarch64->brp_num) {
1381 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1382 return ERROR_FAIL;
1383 }
1384
1385 while ((brp_list[brp_2].used ||
1386 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1387 brp_2++;
1388
1389 printf("brp(IVA) found num: %d\n", brp_2);
1390 if (brp_2 >= aarch64->brp_num) {
1391 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1392 return ERROR_FAIL;
1393 }
1394
1395 breakpoint->set = brp_1 + 1;
1396 breakpoint->linked_BRP = brp_2;
1397 control_CTX = ((CTX_machmode & 0x7) << 20)
1398 | (brp_2 << 16)
1399 | (0 << 14)
1400 | (CTX_byte_addr_select << 5)
1401 | (3 << 1) | 1;
1402 brp_list[brp_1].used = 1;
1403 brp_list[brp_1].value = (breakpoint->asid);
1404 brp_list[brp_1].control = control_CTX;
1405 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1406 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1407 brp_list[brp_1].value);
1408 if (retval != ERROR_OK)
1409 return retval;
1410 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1411 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1412 brp_list[brp_1].control);
1413 if (retval != ERROR_OK)
1414 return retval;
1415
1416 control_IVA = ((IVA_machmode & 0x7) << 20)
1417 | (brp_1 << 16)
1418 | (1 << 13)
1419 | (IVA_byte_addr_select << 5)
1420 | (3 << 1) | 1;
1421 brp_list[brp_2].used = 1;
1422 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1423 brp_list[brp_2].control = control_IVA;
1424 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1425 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1426 brp_list[brp_2].value & 0xFFFFFFFF);
1427 if (retval != ERROR_OK)
1428 return retval;
1429 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1430 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1431 brp_list[brp_2].value >> 32);
1432 if (retval != ERROR_OK)
1433 return retval;
1434 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1435 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1436 brp_list[brp_2].control);
1437 if (retval != ERROR_OK)
1438 return retval;
1439
1440 return ERROR_OK;
1441 }
1442
1443 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1444 {
1445 int retval;
1446 struct aarch64_common *aarch64 = target_to_aarch64(target);
1447 struct armv8_common *armv8 = &aarch64->armv8_common;
1448 struct aarch64_brp *brp_list = aarch64->brp_list;
1449
1450 if (!breakpoint->set) {
1451 LOG_WARNING("breakpoint not set");
1452 return ERROR_OK;
1453 }
1454
1455 if (breakpoint->type == BKPT_HARD) {
1456 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1457 int brp_i = breakpoint->set - 1;
1458 int brp_j = breakpoint->linked_BRP;
1459 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1460 LOG_DEBUG("Invalid BRP number in breakpoint");
1461 return ERROR_OK;
1462 }
1463 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1464 brp_list[brp_i].control, brp_list[brp_i].value);
1465 brp_list[brp_i].used = 0;
1466 brp_list[brp_i].value = 0;
1467 brp_list[brp_i].control = 0;
1468 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1469 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1470 brp_list[brp_i].control);
1471 if (retval != ERROR_OK)
1472 return retval;
1473 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1474 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1475 (uint32_t)brp_list[brp_i].value);
1476 if (retval != ERROR_OK)
1477 return retval;
1478 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1479 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1480 (uint32_t)brp_list[brp_i].value);
1481 if (retval != ERROR_OK)
1482 return retval;
1483 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1484 LOG_DEBUG("Invalid BRP number in breakpoint");
1485 return ERROR_OK;
1486 }
1487 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1488 brp_list[brp_j].control, brp_list[brp_j].value);
1489 brp_list[brp_j].used = 0;
1490 brp_list[brp_j].value = 0;
1491 brp_list[brp_j].control = 0;
1492 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1493 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1494 brp_list[brp_j].control);
1495 if (retval != ERROR_OK)
1496 return retval;
1497 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1498 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1499 (uint32_t)brp_list[brp_j].value);
1500 if (retval != ERROR_OK)
1501 return retval;
1502 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1503 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1504 (uint32_t)brp_list[brp_j].value);
1505 if (retval != ERROR_OK)
1506 return retval;
1507
1508 breakpoint->linked_BRP = 0;
1509 breakpoint->set = 0;
1510 return ERROR_OK;
1511
1512 } else {
1513 int brp_i = breakpoint->set - 1;
1514 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1515 LOG_DEBUG("Invalid BRP number in breakpoint");
1516 return ERROR_OK;
1517 }
1518 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1519 brp_list[brp_i].control, brp_list[brp_i].value);
1520 brp_list[brp_i].used = 0;
1521 brp_list[brp_i].value = 0;
1522 brp_list[brp_i].control = 0;
1523 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1524 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1525 brp_list[brp_i].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1528 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1529 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1530 brp_list[brp_i].value);
1531 if (retval != ERROR_OK)
1532 return retval;
1533
1534 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1535 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1536 (uint32_t)brp_list[brp_i].value);
1537 if (retval != ERROR_OK)
1538 return retval;
1539 breakpoint->set = 0;
1540 return ERROR_OK;
1541 }
1542 } else {
1543 /* restore original instruction (kept in target endianness) */
1544
1545 armv8_cache_d_inner_flush_virt(armv8,
1546 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1547 breakpoint->length);
1548
1549 if (breakpoint->length == 4) {
1550 retval = target_write_memory(target,
1551 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1552 4, 1, breakpoint->orig_instr);
1553 if (retval != ERROR_OK)
1554 return retval;
1555 } else {
1556 retval = target_write_memory(target,
1557 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1558 2, 1, breakpoint->orig_instr);
1559 if (retval != ERROR_OK)
1560 return retval;
1561 }
1562
1563 armv8_cache_d_inner_flush_virt(armv8,
1564 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1565 breakpoint->length);
1566
1567 armv8_cache_i_inner_inval_virt(armv8,
1568 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1569 breakpoint->length);
1570 }
1571 breakpoint->set = 0;
1572
1573 return ERROR_OK;
1574 }
1575
1576 static int aarch64_add_breakpoint(struct target *target,
1577 struct breakpoint *breakpoint)
1578 {
1579 struct aarch64_common *aarch64 = target_to_aarch64(target);
1580
1581 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1582 LOG_INFO("no hardware breakpoint available");
1583 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1584 }
1585
1586 if (breakpoint->type == BKPT_HARD)
1587 aarch64->brp_num_available--;
1588
1589 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1590 }
1591
1592 static int aarch64_add_context_breakpoint(struct target *target,
1593 struct breakpoint *breakpoint)
1594 {
1595 struct aarch64_common *aarch64 = target_to_aarch64(target);
1596
1597 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1598 LOG_INFO("no hardware breakpoint available");
1599 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1600 }
1601
1602 if (breakpoint->type == BKPT_HARD)
1603 aarch64->brp_num_available--;
1604
1605 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1606 }
1607
1608 static int aarch64_add_hybrid_breakpoint(struct target *target,
1609 struct breakpoint *breakpoint)
1610 {
1611 struct aarch64_common *aarch64 = target_to_aarch64(target);
1612
1613 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1614 LOG_INFO("no hardware breakpoint available");
1615 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1616 }
1617
1618 if (breakpoint->type == BKPT_HARD)
1619 aarch64->brp_num_available--;
1620
1621 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1622 }
1623
1624
1625 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1626 {
1627 struct aarch64_common *aarch64 = target_to_aarch64(target);
1628
1629 #if 0
1630 /* It is perfectly possible to remove breakpoints while the target is running */
1631 if (target->state != TARGET_HALTED) {
1632 LOG_WARNING("target not halted");
1633 return ERROR_TARGET_NOT_HALTED;
1634 }
1635 #endif
1636
1637 if (breakpoint->set) {
1638 aarch64_unset_breakpoint(target, breakpoint);
1639 if (breakpoint->type == BKPT_HARD)
1640 aarch64->brp_num_available++;
1641 }
1642
1643 return ERROR_OK;
1644 }
1645
1646 /*
1647 * Cortex-A8 Reset functions
1648 */
1649
1650 static int aarch64_assert_reset(struct target *target)
1651 {
1652 struct armv8_common *armv8 = target_to_armv8(target);
1653
1654 LOG_DEBUG(" ");
1655
1656 /* FIXME when halt is requested, make it work somehow... */
1657
1658 /* Issue some kind of warm reset. */
1659 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1660 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1661 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1662 /* REVISIT handle "pulls" cases, if there's
1663 * hardware that needs them to work.
1664 */
1665 jtag_add_reset(0, 1);
1666 } else {
1667 LOG_ERROR("%s: how to reset?", target_name(target));
1668 return ERROR_FAIL;
1669 }
1670
1671 /* registers are now invalid */
1672 if (target_was_examined(target)) {
1673 register_cache_invalidate(armv8->arm.core_cache);
1674 register_cache_invalidate(armv8->arm.core_cache->next);
1675 }
1676
1677 target->state = TARGET_RESET;
1678
1679 return ERROR_OK;
1680 }
1681
1682 static int aarch64_deassert_reset(struct target *target)
1683 {
1684 int retval;
1685
1686 LOG_DEBUG(" ");
1687
1688 /* be certain SRST is off */
1689 jtag_add_reset(0, 0);
1690
1691 if (!target_was_examined(target))
1692 return ERROR_OK;
1693
1694 retval = aarch64_poll(target);
1695 if (retval != ERROR_OK)
1696 return retval;
1697
1698 retval = aarch64_init_debug_access(target);
1699 if (retval != ERROR_OK)
1700 return retval;
1701
1702 if (target->reset_halt) {
1703 if (target->state != TARGET_HALTED) {
1704 LOG_WARNING("%s: ran after reset and before halt ...",
1705 target_name(target));
1706 retval = target_halt(target);
1707 }
1708 }
1709
1710 return retval;
1711 }
1712
1713 static int aarch64_write_cpu_memory_slow(struct target *target,
1714 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1715 {
1716 struct armv8_common *armv8 = target_to_armv8(target);
1717 struct arm_dpm *dpm = &armv8->dpm;
1718 struct arm *arm = &armv8->arm;
1719 int retval;
1720
1721 armv8_reg_current(arm, 1)->dirty = true;
1722
1723 /* change DCC to normal mode if necessary */
1724 if (*dscr & DSCR_MA) {
1725 *dscr &= ~DSCR_MA;
1726 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1727 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1728 if (retval != ERROR_OK)
1729 return retval;
1730 }
1731
1732 while (count) {
1733 uint32_t data, opcode;
1734
1735 /* write the data to store into DTRRX */
1736 if (size == 1)
1737 data = *buffer;
1738 else if (size == 2)
1739 data = target_buffer_get_u16(target, buffer);
1740 else
1741 data = target_buffer_get_u32(target, buffer);
1742 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1743 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1744 if (retval != ERROR_OK)
1745 return retval;
1746
1747 if (arm->core_state == ARM_STATE_AARCH64)
1748 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1749 else
1750 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1751 if (retval != ERROR_OK)
1752 return retval;
1753
1754 if (size == 1)
1755 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1756 else if (size == 2)
1757 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1758 else
1759 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1760 retval = dpm->instr_execute(dpm, opcode);
1761 if (retval != ERROR_OK)
1762 return retval;
1763
1764 /* Advance */
1765 buffer += size;
1766 --count;
1767 }
1768
1769 return ERROR_OK;
1770 }
1771
1772 static int aarch64_write_cpu_memory_fast(struct target *target,
1773 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1774 {
1775 struct armv8_common *armv8 = target_to_armv8(target);
1776 struct arm *arm = &armv8->arm;
1777 int retval;
1778
1779 armv8_reg_current(arm, 1)->dirty = true;
1780
1781 /* Step 1.d - Change DCC to memory mode */
1782 *dscr |= DSCR_MA;
1783 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1784 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1785 if (retval != ERROR_OK)
1786 return retval;
1787
1788
1789 /* Step 2.a - Do the write */
1790 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1791 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1792 if (retval != ERROR_OK)
1793 return retval;
1794
1795 /* Step 3.a - Switch DTR mode back to Normal mode */
1796 *dscr &= ~DSCR_MA;
1797 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1798 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1799 if (retval != ERROR_OK)
1800 return retval;
1801
1802 return ERROR_OK;
1803 }
1804
1805 static int aarch64_write_cpu_memory(struct target *target,
1806 uint64_t address, uint32_t size,
1807 uint32_t count, const uint8_t *buffer)
1808 {
1809 /* write memory through APB-AP */
1810 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1811 struct armv8_common *armv8 = target_to_armv8(target);
1812 struct arm_dpm *dpm = &armv8->dpm;
1813 struct arm *arm = &armv8->arm;
1814 uint32_t dscr;
1815
1816 if (target->state != TARGET_HALTED) {
1817 LOG_WARNING("target not halted");
1818 return ERROR_TARGET_NOT_HALTED;
1819 }
1820
1821 /* Mark register X0 as dirty, as it will be used
1822 * for transferring the data.
1823 * It will be restored automatically when exiting
1824 * debug mode
1825 */
1826 armv8_reg_current(arm, 0)->dirty = true;
1827
1828 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1829
1830 /* Read DSCR */
1831 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1832 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1833 if (retval != ERROR_OK)
1834 return retval;
1835
1836 /* Set Normal access mode */
1837 dscr = (dscr & ~DSCR_MA);
1838 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1839 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1840 if (retval != ERROR_OK)
1841 return retval;
1842
1843 if (arm->core_state == ARM_STATE_AARCH64) {
1844 /* Write X0 with value 'address' using write procedure */
1845 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1846 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1847 retval = dpm->instr_write_data_dcc_64(dpm,
1848 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1849 } else {
1850 /* Write R0 with value 'address' using write procedure */
1851 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1852 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1853 retval = dpm->instr_write_data_dcc(dpm,
1854 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1855 }
1856
1857 if (retval != ERROR_OK)
1858 return retval;
1859
1860 if (size == 4 && (address % 4) == 0)
1861 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1862 else
1863 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1864
1865 if (retval != ERROR_OK) {
1866 /* Unset DTR mode */
1867 mem_ap_read_atomic_u32(armv8->debug_ap,
1868 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1869 dscr &= ~DSCR_MA;
1870 mem_ap_write_atomic_u32(armv8->debug_ap,
1871 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1872 }
1873
1874 /* Check for sticky abort flags in the DSCR */
1875 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1876 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1877 if (retval != ERROR_OK)
1878 return retval;
1879
1880 dpm->dscr = dscr;
1881 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1882 /* Abort occurred - clear it and exit */
1883 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1884 armv8_dpm_handle_exception(dpm, true);
1885 return ERROR_FAIL;
1886 }
1887
1888 /* Done */
1889 return ERROR_OK;
1890 }
1891
1892 static int aarch64_read_cpu_memory_slow(struct target *target,
1893 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1894 {
1895 struct armv8_common *armv8 = target_to_armv8(target);
1896 struct arm_dpm *dpm = &armv8->dpm;
1897 struct arm *arm = &armv8->arm;
1898 int retval;
1899
1900 armv8_reg_current(arm, 1)->dirty = true;
1901
1902 /* change DCC to normal mode (if necessary) */
1903 if (*dscr & DSCR_MA) {
1904 *dscr &= DSCR_MA;
1905 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1906 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1907 if (retval != ERROR_OK)
1908 return retval;
1909 }
1910
1911 while (count) {
1912 uint32_t opcode, data;
1913
1914 if (size == 1)
1915 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1916 else if (size == 2)
1917 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1918 else
1919 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1920 retval = dpm->instr_execute(dpm, opcode);
1921 if (retval != ERROR_OK)
1922 return retval;
1923
1924 if (arm->core_state == ARM_STATE_AARCH64)
1925 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1926 else
1927 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1928 if (retval != ERROR_OK)
1929 return retval;
1930
1931 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1932 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1933 if (retval != ERROR_OK)
1934 return retval;
1935
1936 if (size == 1)
1937 *buffer = (uint8_t)data;
1938 else if (size == 2)
1939 target_buffer_set_u16(target, buffer, (uint16_t)data);
1940 else
1941 target_buffer_set_u32(target, buffer, data);
1942
1943 /* Advance */
1944 buffer += size;
1945 --count;
1946 }
1947
1948 return ERROR_OK;
1949 }
1950
1951 static int aarch64_read_cpu_memory_fast(struct target *target,
1952 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1953 {
1954 struct armv8_common *armv8 = target_to_armv8(target);
1955 struct arm_dpm *dpm = &armv8->dpm;
1956 struct arm *arm = &armv8->arm;
1957 int retval;
1958 uint32_t value;
1959
1960 /* Mark X1 as dirty */
1961 armv8_reg_current(arm, 1)->dirty = true;
1962
1963 if (arm->core_state == ARM_STATE_AARCH64) {
1964 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1965 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1966 } else {
1967 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1968 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1969 }
1970
1971 if (retval != ERROR_OK)
1972 return retval;
1973
1974 /* Step 1.e - Change DCC to memory mode */
1975 *dscr |= DSCR_MA;
1976 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1977 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1978 if (retval != ERROR_OK)
1979 return retval;
1980
1981 /* Step 1.f - read DBGDTRTX and discard the value */
1982 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1983 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1984 if (retval != ERROR_OK)
1985 return retval;
1986
1987 count--;
1988 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1989 * Abort flags are sticky, so can be read at end of transactions
1990 *
1991 * This data is read in aligned to 32 bit boundary.
1992 */
1993
1994 if (count) {
1995 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1996 * increments X0 by 4. */
1997 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1998 armv8->debug_base + CPUV8_DBG_DTRTX);
1999 if (retval != ERROR_OK)
2000 return retval;
2001 }
2002
2003 /* Step 3.a - set DTR access mode back to Normal mode */
2004 *dscr &= ~DSCR_MA;
2005 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2006 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2007 if (retval != ERROR_OK)
2008 return retval;
2009
2010 /* Step 3.b - read DBGDTRTX for the final value */
2011 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2012 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 target_buffer_set_u32(target, buffer + count * 4, value);
2017 return retval;
2018 }
2019
2020 static int aarch64_read_cpu_memory(struct target *target,
2021 target_addr_t address, uint32_t size,
2022 uint32_t count, uint8_t *buffer)
2023 {
2024 /* read memory through APB-AP */
2025 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2026 struct armv8_common *armv8 = target_to_armv8(target);
2027 struct arm_dpm *dpm = &armv8->dpm;
2028 struct arm *arm = &armv8->arm;
2029 uint32_t dscr;
2030
2031 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2032 address, size, count);
2033
2034 if (target->state != TARGET_HALTED) {
2035 LOG_WARNING("target not halted");
2036 return ERROR_TARGET_NOT_HALTED;
2037 }
2038
2039 /* Mark register X0 as dirty, as it will be used
2040 * for transferring the data.
2041 * It will be restored automatically when exiting
2042 * debug mode
2043 */
2044 armv8_reg_current(arm, 0)->dirty = true;
2045
2046 /* Read DSCR */
2047 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2048 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2049 if (retval != ERROR_OK)
2050 return retval;
2051
2052 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2053
2054 /* Set Normal access mode */
2055 dscr &= ~DSCR_MA;
2056 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2057 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2058 if (retval != ERROR_OK)
2059 return retval;
2060
2061 if (arm->core_state == ARM_STATE_AARCH64) {
2062 /* Write X0 with value 'address' using write procedure */
2063 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2064 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2065 retval = dpm->instr_write_data_dcc_64(dpm,
2066 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2067 } else {
2068 /* Write R0 with value 'address' using write procedure */
2069 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2070 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2071 retval = dpm->instr_write_data_dcc(dpm,
2072 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2073 }
2074
2075 if (retval != ERROR_OK)
2076 return retval;
2077
2078 if (size == 4 && (address % 4) == 0)
2079 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2080 else
2081 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2082
2083 if (dscr & DSCR_MA) {
2084 dscr &= ~DSCR_MA;
2085 mem_ap_write_atomic_u32(armv8->debug_ap,
2086 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2087 }
2088
2089 if (retval != ERROR_OK)
2090 return retval;
2091
2092 /* Check for sticky abort flags in the DSCR */
2093 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2094 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2095 if (retval != ERROR_OK)
2096 return retval;
2097
2098 dpm->dscr = dscr;
2099
2100 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2101 /* Abort occurred - clear it and exit */
2102 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2103 armv8_dpm_handle_exception(dpm, true);
2104 return ERROR_FAIL;
2105 }
2106
2107 /* Done */
2108 return ERROR_OK;
2109 }
2110
2111 static int aarch64_read_phys_memory(struct target *target,
2112 target_addr_t address, uint32_t size,
2113 uint32_t count, uint8_t *buffer)
2114 {
2115 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2116
2117 if (count && buffer) {
2118 /* read memory through APB-AP */
2119 retval = aarch64_mmu_modify(target, 0);
2120 if (retval != ERROR_OK)
2121 return retval;
2122 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2123 }
2124 return retval;
2125 }
2126
2127 static int aarch64_read_memory(struct target *target, target_addr_t address,
2128 uint32_t size, uint32_t count, uint8_t *buffer)
2129 {
2130 int mmu_enabled = 0;
2131 int retval;
2132
2133 /* determine if MMU was enabled on target stop */
2134 retval = aarch64_mmu(target, &mmu_enabled);
2135 if (retval != ERROR_OK)
2136 return retval;
2137
2138 if (mmu_enabled) {
2139 /* enable MMU as we could have disabled it for phys access */
2140 retval = aarch64_mmu_modify(target, 1);
2141 if (retval != ERROR_OK)
2142 return retval;
2143 }
2144 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2145 }
2146
2147 static int aarch64_write_phys_memory(struct target *target,
2148 target_addr_t address, uint32_t size,
2149 uint32_t count, const uint8_t *buffer)
2150 {
2151 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2152
2153 if (count && buffer) {
2154 /* write memory through APB-AP */
2155 retval = aarch64_mmu_modify(target, 0);
2156 if (retval != ERROR_OK)
2157 return retval;
2158 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2159 }
2160
2161 return retval;
2162 }
2163
2164 static int aarch64_write_memory(struct target *target, target_addr_t address,
2165 uint32_t size, uint32_t count, const uint8_t *buffer)
2166 {
2167 int mmu_enabled = 0;
2168 int retval;
2169
2170 /* determine if MMU was enabled on target stop */
2171 retval = aarch64_mmu(target, &mmu_enabled);
2172 if (retval != ERROR_OK)
2173 return retval;
2174
2175 if (mmu_enabled) {
2176 /* enable MMU as we could have disabled it for phys access */
2177 retval = aarch64_mmu_modify(target, 1);
2178 if (retval != ERROR_OK)
2179 return retval;
2180 }
2181 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2182 }
2183
2184 static int aarch64_handle_target_request(void *priv)
2185 {
2186 struct target *target = priv;
2187 struct armv8_common *armv8 = target_to_armv8(target);
2188 int retval;
2189
2190 if (!target_was_examined(target))
2191 return ERROR_OK;
2192 if (!target->dbg_msg_enabled)
2193 return ERROR_OK;
2194
2195 if (target->state == TARGET_RUNNING) {
2196 uint32_t request;
2197 uint32_t dscr;
2198 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2199 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2200
2201 /* check if we have data */
2202 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2203 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2204 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2205 if (retval == ERROR_OK) {
2206 target_request(target, request);
2207 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2208 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2209 }
2210 }
2211 }
2212
2213 return ERROR_OK;
2214 }
2215
2216 static int aarch64_examine_first(struct target *target)
2217 {
2218 struct aarch64_common *aarch64 = target_to_aarch64(target);
2219 struct armv8_common *armv8 = &aarch64->armv8_common;
2220 struct adiv5_dap *swjdp = armv8->arm.dap;
2221 struct aarch64_private_config *pc;
2222 int i;
2223 int retval = ERROR_OK;
2224 uint64_t debug, ttypr;
2225 uint32_t cpuid;
2226 uint32_t tmp0, tmp1, tmp2, tmp3;
2227 debug = ttypr = cpuid = 0;
2228
2229 /* Search for the APB-AB - it is needed for access to debug registers */
2230 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2231 if (retval != ERROR_OK) {
2232 LOG_ERROR("Could not find APB-AP for debug access");
2233 return retval;
2234 }
2235
2236 retval = mem_ap_init(armv8->debug_ap);
2237 if (retval != ERROR_OK) {
2238 LOG_ERROR("Could not initialize the APB-AP");
2239 return retval;
2240 }
2241
2242 armv8->debug_ap->memaccess_tck = 10;
2243
2244 if (!target->dbgbase_set) {
2245 uint32_t dbgbase;
2246 /* Get ROM Table base */
2247 uint32_t apid;
2248 int32_t coreidx = target->coreid;
2249 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2250 if (retval != ERROR_OK)
2251 return retval;
2252 /* Lookup 0x15 -- Processor DAP */
2253 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2254 &armv8->debug_base, &coreidx);
2255 if (retval != ERROR_OK)
2256 return retval;
2257 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2258 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2259 } else
2260 armv8->debug_base = target->dbgbase;
2261
2262 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2263 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2264 if (retval != ERROR_OK) {
2265 LOG_DEBUG("Examine %s failed", "oslock");
2266 return retval;
2267 }
2268
2269 retval = mem_ap_read_u32(armv8->debug_ap,
2270 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2271 if (retval != ERROR_OK) {
2272 LOG_DEBUG("Examine %s failed", "CPUID");
2273 return retval;
2274 }
2275
2276 retval = mem_ap_read_u32(armv8->debug_ap,
2277 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2278 retval += mem_ap_read_u32(armv8->debug_ap,
2279 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2280 if (retval != ERROR_OK) {
2281 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2282 return retval;
2283 }
2284 retval = mem_ap_read_u32(armv8->debug_ap,
2285 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2286 retval += mem_ap_read_u32(armv8->debug_ap,
2287 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2288 if (retval != ERROR_OK) {
2289 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2290 return retval;
2291 }
2292
2293 retval = dap_run(armv8->debug_ap->dap);
2294 if (retval != ERROR_OK) {
2295 LOG_ERROR("%s: examination failed\n", target_name(target));
2296 return retval;
2297 }
2298
2299 ttypr |= tmp1;
2300 ttypr = (ttypr << 32) | tmp0;
2301 debug |= tmp3;
2302 debug = (debug << 32) | tmp2;
2303
2304 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2305 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2306 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2307
2308 if (target->private_config == NULL)
2309 return ERROR_FAIL;
2310
2311 pc = (struct aarch64_private_config *)target->private_config;
2312 if (pc->cti == NULL)
2313 return ERROR_FAIL;
2314
2315 armv8->cti = pc->cti;
2316
2317 retval = aarch64_dpm_setup(aarch64, debug);
2318 if (retval != ERROR_OK)
2319 return retval;
2320
2321 /* Setup Breakpoint Register Pairs */
2322 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2323 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2324 aarch64->brp_num_available = aarch64->brp_num;
2325 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2326 for (i = 0; i < aarch64->brp_num; i++) {
2327 aarch64->brp_list[i].used = 0;
2328 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2329 aarch64->brp_list[i].type = BRP_NORMAL;
2330 else
2331 aarch64->brp_list[i].type = BRP_CONTEXT;
2332 aarch64->brp_list[i].value = 0;
2333 aarch64->brp_list[i].control = 0;
2334 aarch64->brp_list[i].BRPn = i;
2335 }
2336
2337 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2338
2339 target->state = TARGET_UNKNOWN;
2340 target->debug_reason = DBG_REASON_NOTHALTED;
2341 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2342 target_set_examined(target);
2343 return ERROR_OK;
2344 }
2345
2346 static int aarch64_examine(struct target *target)
2347 {
2348 int retval = ERROR_OK;
2349
2350 /* don't re-probe hardware after each reset */
2351 if (!target_was_examined(target))
2352 retval = aarch64_examine_first(target);
2353
2354 /* Configure core debug access */
2355 if (retval == ERROR_OK)
2356 retval = aarch64_init_debug_access(target);
2357
2358 return retval;
2359 }
2360
2361 /*
2362 * Cortex-A8 target creation and initialization
2363 */
2364
2365 static int aarch64_init_target(struct command_context *cmd_ctx,
2366 struct target *target)
2367 {
2368 /* examine_first() does a bunch of this */
2369 arm_semihosting_init(target);
2370 return ERROR_OK;
2371 }
2372
2373 static int aarch64_init_arch_info(struct target *target,
2374 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2375 {
2376 struct armv8_common *armv8 = &aarch64->armv8_common;
2377
2378 /* Setup struct aarch64_common */
2379 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2380 armv8->arm.dap = dap;
2381
2382 /* register arch-specific functions */
2383 armv8->examine_debug_reason = NULL;
2384 armv8->post_debug_entry = aarch64_post_debug_entry;
2385 armv8->pre_restore_context = NULL;
2386 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2387
2388 armv8_init_arch_info(target, armv8);
2389 target_register_timer_callback(aarch64_handle_target_request, 1,
2390 TARGET_TIMER_TYPE_PERIODIC, target);
2391
2392 return ERROR_OK;
2393 }
2394
2395 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2396 {
2397 struct aarch64_private_config *pc = target->private_config;
2398 struct aarch64_common *aarch64;
2399
2400 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2401 return ERROR_FAIL;
2402
2403 aarch64 = calloc(1, sizeof(struct aarch64_common));
2404 if (aarch64 == NULL) {
2405 LOG_ERROR("Out of memory");
2406 return ERROR_FAIL;
2407 }
2408
2409 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2410 }
2411
2412 static void aarch64_deinit_target(struct target *target)
2413 {
2414 struct aarch64_common *aarch64 = target_to_aarch64(target);
2415 struct armv8_common *armv8 = &aarch64->armv8_common;
2416 struct arm_dpm *dpm = &armv8->dpm;
2417
2418 armv8_free_reg_cache(target);
2419 free(aarch64->brp_list);
2420 free(dpm->dbp);
2421 free(dpm->dwp);
2422 free(target->private_config);
2423 free(aarch64);
2424 }
2425
2426 static int aarch64_mmu(struct target *target, int *enabled)
2427 {
2428 if (target->state != TARGET_HALTED) {
2429 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2430 return ERROR_TARGET_INVALID;
2431 }
2432
2433 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2434 return ERROR_OK;
2435 }
2436
2437 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2438 target_addr_t *phys)
2439 {
2440 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2441 }
2442
2443 /*
2444 * private target configuration items
2445 */
2446 enum aarch64_cfg_param {
2447 CFG_CTI,
2448 };
2449
2450 static const Jim_Nvp nvp_config_opts[] = {
2451 { .name = "-cti", .value = CFG_CTI },
2452 { .name = NULL, .value = -1 }
2453 };
2454
2455 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2456 {
2457 struct aarch64_private_config *pc;
2458 Jim_Nvp *n;
2459 int e;
2460
2461 pc = (struct aarch64_private_config *)target->private_config;
2462 if (pc == NULL) {
2463 pc = calloc(1, sizeof(struct aarch64_private_config));
2464 target->private_config = pc;
2465 }
2466
2467 /*
2468 * Call adiv5_jim_configure() to parse the common DAP options
2469 * It will return JIM_CONTINUE if it didn't find any known
2470 * options, JIM_OK if it correctly parsed the topmost option
2471 * and JIM_ERR if an error occured during parameter evaluation.
2472 * For JIM_CONTINUE, we check our own params.
2473 */
2474 e = adiv5_jim_configure(target, goi);
2475 if (e != JIM_CONTINUE)
2476 return e;
2477
2478 /* parse config or cget options ... */
2479 if (goi->argc > 0) {
2480 Jim_SetEmptyResult(goi->interp);
2481
2482 /* check first if topmost item is for us */
2483 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2484 goi->argv[0], &n);
2485 if (e != JIM_OK)
2486 return JIM_CONTINUE;
2487
2488 e = Jim_GetOpt_Obj(goi, NULL);
2489 if (e != JIM_OK)
2490 return e;
2491
2492 switch (n->value) {
2493 case CFG_CTI: {
2494 if (goi->isconfigure) {
2495 Jim_Obj *o_cti;
2496 struct arm_cti *cti;
2497 e = Jim_GetOpt_Obj(goi, &o_cti);
2498 if (e != JIM_OK)
2499 return e;
2500 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2501 if (cti == NULL) {
2502 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2503 return JIM_ERR;
2504 }
2505 pc->cti = cti;
2506 } else {
2507 if (goi->argc != 0) {
2508 Jim_WrongNumArgs(goi->interp,
2509 goi->argc, goi->argv,
2510 "NO PARAMS");
2511 return JIM_ERR;
2512 }
2513
2514 if (pc == NULL || pc->cti == NULL) {
2515 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2516 return JIM_ERR;
2517 }
2518 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2519 }
2520 break;
2521 }
2522
2523 default:
2524 return JIM_CONTINUE;
2525 }
2526 }
2527
2528 return JIM_OK;
2529 }
2530
2531 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2532 {
2533 struct target *target = get_current_target(CMD_CTX);
2534 struct armv8_common *armv8 = target_to_armv8(target);
2535
2536 return armv8_handle_cache_info_command(CMD,
2537 &armv8->armv8_mmu.armv8_cache);
2538 }
2539
2540
2541 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2542 {
2543 struct target *target = get_current_target(CMD_CTX);
2544 if (!target_was_examined(target)) {
2545 LOG_ERROR("target not examined yet");
2546 return ERROR_FAIL;
2547 }
2548
2549 return aarch64_init_debug_access(target);
2550 }
2551
2552 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2553 {
2554 struct target *target = get_current_target(CMD_CTX);
2555 struct aarch64_common *aarch64 = target_to_aarch64(target);
2556
2557 static const Jim_Nvp nvp_maskisr_modes[] = {
2558 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2559 { .name = "on", .value = AARCH64_ISRMASK_ON },
2560 { .name = NULL, .value = -1 },
2561 };
2562 const Jim_Nvp *n;
2563
2564 if (CMD_ARGC > 0) {
2565 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2566 if (n->name == NULL) {
2567 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2568 return ERROR_COMMAND_SYNTAX_ERROR;
2569 }
2570
2571 aarch64->isrmasking_mode = n->value;
2572 }
2573
2574 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2575 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2576
2577 return ERROR_OK;
2578 }
2579
2580 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2581 {
2582 struct command_context *context;
2583 struct target *target;
2584 struct arm *arm;
2585 int retval;
2586 bool is_mcr = false;
2587 int arg_cnt = 0;
2588
2589 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2590 is_mcr = true;
2591 arg_cnt = 7;
2592 } else {
2593 arg_cnt = 6;
2594 }
2595
2596 context = current_command_context(interp);
2597 assert(context != NULL);
2598
2599 target = get_current_target(context);
2600 if (target == NULL) {
2601 LOG_ERROR("%s: no current target", __func__);
2602 return JIM_ERR;
2603 }
2604 if (!target_was_examined(target)) {
2605 LOG_ERROR("%s: not yet examined", target_name(target));
2606 return JIM_ERR;
2607 }
2608
2609 arm = target_to_arm(target);
2610 if (!is_arm(arm)) {
2611 LOG_ERROR("%s: not an ARM", target_name(target));
2612 return JIM_ERR;
2613 }
2614
2615 if (target->state != TARGET_HALTED)
2616 return ERROR_TARGET_NOT_HALTED;
2617
2618 if (arm->core_state == ARM_STATE_AARCH64) {
2619 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2620 return JIM_ERR;
2621 }
2622
2623 if (argc != arg_cnt) {
2624 LOG_ERROR("%s: wrong number of arguments", __func__);
2625 return JIM_ERR;
2626 }
2627
2628 int cpnum;
2629 uint32_t op1;
2630 uint32_t op2;
2631 uint32_t CRn;
2632 uint32_t CRm;
2633 uint32_t value;
2634 long l;
2635
2636 /* NOTE: parameter sequence matches ARM instruction set usage:
2637 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2638 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2639 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2640 */
2641 retval = Jim_GetLong(interp, argv[1], &l);
2642 if (retval != JIM_OK)
2643 return retval;
2644 if (l & ~0xf) {
2645 LOG_ERROR("%s: %s %d out of range", __func__,
2646 "coprocessor", (int) l);
2647 return JIM_ERR;
2648 }
2649 cpnum = l;
2650
2651 retval = Jim_GetLong(interp, argv[2], &l);
2652 if (retval != JIM_OK)
2653 return retval;
2654 if (l & ~0x7) {
2655 LOG_ERROR("%s: %s %d out of range", __func__,
2656 "op1", (int) l);
2657 return JIM_ERR;
2658 }
2659 op1 = l;
2660
2661 retval = Jim_GetLong(interp, argv[3], &l);
2662 if (retval != JIM_OK)
2663 return retval;
2664 if (l & ~0xf) {
2665 LOG_ERROR("%s: %s %d out of range", __func__,
2666 "CRn", (int) l);
2667 return JIM_ERR;
2668 }
2669 CRn = l;
2670
2671 retval = Jim_GetLong(interp, argv[4], &l);
2672 if (retval != JIM_OK)
2673 return retval;
2674 if (l & ~0xf) {
2675 LOG_ERROR("%s: %s %d out of range", __func__,
2676 "CRm", (int) l);
2677 return JIM_ERR;
2678 }
2679 CRm = l;
2680
2681 retval = Jim_GetLong(interp, argv[5], &l);
2682 if (retval != JIM_OK)
2683 return retval;
2684 if (l & ~0x7) {
2685 LOG_ERROR("%s: %s %d out of range", __func__,
2686 "op2", (int) l);
2687 return JIM_ERR;
2688 }
2689 op2 = l;
2690
2691 value = 0;
2692
2693 if (is_mcr == true) {
2694 retval = Jim_GetLong(interp, argv[6], &l);
2695 if (retval != JIM_OK)
2696 return retval;
2697 value = l;
2698
2699 /* NOTE: parameters reordered! */
2700 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2701 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2702 if (retval != ERROR_OK)
2703 return JIM_ERR;
2704 } else {
2705 /* NOTE: parameters reordered! */
2706 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2707 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2708 if (retval != ERROR_OK)
2709 return JIM_ERR;
2710
2711 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2712 }
2713
2714 return JIM_OK;
2715 }
2716
2717 static const struct command_registration aarch64_exec_command_handlers[] = {
2718 {
2719 .name = "cache_info",
2720 .handler = aarch64_handle_cache_info_command,
2721 .mode = COMMAND_EXEC,
2722 .help = "display information about target caches",
2723 .usage = "",
2724 },
2725 {
2726 .name = "dbginit",
2727 .handler = aarch64_handle_dbginit_command,
2728 .mode = COMMAND_EXEC,
2729 .help = "Initialize core debug",
2730 .usage = "",
2731 },
2732 {
2733 .name = "maskisr",
2734 .handler = aarch64_mask_interrupts_command,
2735 .mode = COMMAND_ANY,
2736 .help = "mask aarch64 interrupts during single-step",
2737 .usage = "['on'|'off']",
2738 },
2739 {
2740 .name = "mcr",
2741 .mode = COMMAND_EXEC,
2742 .jim_handler = jim_mcrmrc,
2743 .help = "write coprocessor register",
2744 .usage = "cpnum op1 CRn CRm op2 value",
2745 },
2746 {
2747 .name = "mrc",
2748 .mode = COMMAND_EXEC,
2749 .jim_handler = jim_mcrmrc,
2750 .help = "read coprocessor register",
2751 .usage = "cpnum op1 CRn CRm op2",
2752 },
2753 {
2754 .chain = smp_command_handlers,
2755 },
2756
2757
2758 COMMAND_REGISTRATION_DONE
2759 };
2760
2761 static const struct command_registration aarch64_command_handlers[] = {
2762 {
2763 .chain = armv8_command_handlers,
2764 },
2765 {
2766 .name = "aarch64",
2767 .mode = COMMAND_ANY,
2768 .help = "Aarch64 command group",
2769 .usage = "",
2770 .chain = aarch64_exec_command_handlers,
2771 },
2772 COMMAND_REGISTRATION_DONE
2773 };
2774
2775 struct target_type aarch64_target = {
2776 .name = "aarch64",
2777
2778 .poll = aarch64_poll,
2779 .arch_state = armv8_arch_state,
2780
2781 .halt = aarch64_halt,
2782 .resume = aarch64_resume,
2783 .step = aarch64_step,
2784
2785 .assert_reset = aarch64_assert_reset,
2786 .deassert_reset = aarch64_deassert_reset,
2787
2788 /* REVISIT allow exporting VFP3 registers ... */
2789 .get_gdb_arch = armv8_get_gdb_arch,
2790 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2791
2792 .read_memory = aarch64_read_memory,
2793 .write_memory = aarch64_write_memory,
2794
2795 .add_breakpoint = aarch64_add_breakpoint,
2796 .add_context_breakpoint = aarch64_add_context_breakpoint,
2797 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2798 .remove_breakpoint = aarch64_remove_breakpoint,
2799 .add_watchpoint = NULL,
2800 .remove_watchpoint = NULL,
2801
2802 .commands = aarch64_command_handlers,
2803 .target_create = aarch64_target_create,
2804 .target_jim_configure = aarch64_jim_configure,
2805 .init_target = aarch64_init_target,
2806 .deinit_target = aarch64_deinit_target,
2807 .examine = aarch64_examine,
2808
2809 .read_phys_memory = aarch64_read_phys_memory,
2810 .write_phys_memory = aarch64_write_phys_memory,
2811 .mmu = aarch64_mmu,
2812 .virt2phys = aarch64_virt2phys,
2813 };