cortex_m: read and display core security state
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include "jtag/interface.h"
33 #include "smp.h"
34 #include <helper/time_support.h>
35
36 enum restart_mode {
37 RESTART_LAZY,
38 RESTART_SYNC,
39 };
40
41 enum halt_mode {
42 HALT_LAZY,
43 HALT_SYNC,
44 };
45
46 struct aarch64_private_config {
47 struct adiv5_private_config adiv5_config;
48 struct arm_cti *cti;
49 };
50
51 static int aarch64_poll(struct target *target);
52 static int aarch64_debug_entry(struct target *target);
53 static int aarch64_restore_context(struct target *target, bool bpwp);
54 static int aarch64_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int aarch64_set_context_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int aarch64_set_hybrid_breakpoint(struct target *target,
59 struct breakpoint *breakpoint);
60 static int aarch64_unset_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int aarch64_mmu(struct target *target, int *enabled);
63 static int aarch64_virt2phys(struct target *target,
64 target_addr_t virt, target_addr_t *phys);
65 static int aarch64_read_cpu_memory(struct target *target,
66 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
67
68 static int aarch64_restore_system_control_reg(struct target *target)
69 {
70 enum arm_mode target_mode = ARM_MODE_ANY;
71 int retval = ERROR_OK;
72 uint32_t instr;
73
74 struct aarch64_common *aarch64 = target_to_aarch64(target);
75 struct armv8_common *armv8 = target_to_armv8(target);
76
77 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
78 aarch64->system_control_reg_curr = aarch64->system_control_reg;
79 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
80
81 switch (armv8->arm.core_mode) {
82 case ARMV8_64_EL0T:
83 target_mode = ARMV8_64_EL1H;
84 /* fall through */
85 case ARMV8_64_EL1T:
86 case ARMV8_64_EL1H:
87 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
88 break;
89 case ARMV8_64_EL2T:
90 case ARMV8_64_EL2H:
91 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
92 break;
93 case ARMV8_64_EL3H:
94 case ARMV8_64_EL3T:
95 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
96 break;
97
98 case ARM_MODE_SVC:
99 case ARM_MODE_ABT:
100 case ARM_MODE_FIQ:
101 case ARM_MODE_IRQ:
102 case ARM_MODE_HYP:
103 case ARM_MODE_SYS:
104 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
105 break;
106
107 default:
108 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
109 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
110 return ERROR_FAIL;
111 }
112
113 if (target_mode != ARM_MODE_ANY)
114 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
115
116 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
117 if (retval != ERROR_OK)
118 return retval;
119
120 if (target_mode != ARM_MODE_ANY)
121 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
122 }
123
124 return retval;
125 }
126
127 /* modify system_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int aarch64_mmu_modify(struct target *target, int enable)
131 {
132 struct aarch64_common *aarch64 = target_to_aarch64(target);
133 struct armv8_common *armv8 = &aarch64->armv8_common;
134 int retval = ERROR_OK;
135 uint32_t instr = 0;
136
137 if (enable) {
138 /* if mmu enabled at target stop and mmu not enable */
139 if (!(aarch64->system_control_reg & 0x1U)) {
140 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
141 return ERROR_FAIL;
142 }
143 if (!(aarch64->system_control_reg_curr & 0x1U))
144 aarch64->system_control_reg_curr |= 0x1U;
145 } else {
146 if (aarch64->system_control_reg_curr & 0x4U) {
147 /* data cache is active */
148 aarch64->system_control_reg_curr &= ~0x4U;
149 /* flush data cache armv8 function to be called */
150 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
151 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
152 }
153 if ((aarch64->system_control_reg_curr & 0x1U)) {
154 aarch64->system_control_reg_curr &= ~0x1U;
155 }
156 }
157
158 switch (armv8->arm.core_mode) {
159 case ARMV8_64_EL0T:
160 case ARMV8_64_EL1T:
161 case ARMV8_64_EL1H:
162 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
163 break;
164 case ARMV8_64_EL2T:
165 case ARMV8_64_EL2H:
166 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
167 break;
168 case ARMV8_64_EL3H:
169 case ARMV8_64_EL3T:
170 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
171 break;
172
173 case ARM_MODE_SVC:
174 case ARM_MODE_ABT:
175 case ARM_MODE_FIQ:
176 case ARM_MODE_IRQ:
177 case ARM_MODE_HYP:
178 case ARM_MODE_SYS:
179 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
180 break;
181
182 default:
183 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
184 break;
185 }
186
187 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
188 aarch64->system_control_reg_curr);
189 return retval;
190 }
191
192 /*
193 * Basic debug access, very low level assumes state is saved
194 */
195 static int aarch64_init_debug_access(struct target *target)
196 {
197 struct armv8_common *armv8 = target_to_armv8(target);
198 int retval;
199 uint32_t dummy;
200
201 LOG_DEBUG("%s", target_name(target));
202
203 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
204 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
205 if (retval != ERROR_OK) {
206 LOG_DEBUG("Examine %s failed", "oslock");
207 return retval;
208 }
209
210 /* Clear Sticky Power Down status Bit in PRSR to enable access to
211 the registers in the Core Power Domain */
212 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
213 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
214 if (retval != ERROR_OK)
215 return retval;
216
217 /*
218 * Static CTI configuration:
219 * Channel 0 -> trigger outputs HALT request to PE
220 * Channel 1 -> trigger outputs Resume request to PE
221 * Gate all channel trigger events from entering the CTM
222 */
223
224 /* Enable CTI */
225 retval = arm_cti_enable(armv8->cti, true);
226 /* By default, gate all channel events to and from the CTM */
227 if (retval == ERROR_OK)
228 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
229 /* output halt requests to PE on channel 0 event */
230 if (retval == ERROR_OK)
231 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
232 /* output restart requests to PE on channel 1 event */
233 if (retval == ERROR_OK)
234 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
235 if (retval != ERROR_OK)
236 return retval;
237
238 /* Resync breakpoint registers */
239
240 return ERROR_OK;
241 }
242
243 /* Write to memory mapped registers directly with no cache or mmu handling */
244 static int aarch64_dap_write_memap_register_u32(struct target *target,
245 uint32_t address,
246 uint32_t value)
247 {
248 int retval;
249 struct armv8_common *armv8 = target_to_armv8(target);
250
251 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
252
253 return retval;
254 }
255
256 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
257 {
258 struct arm_dpm *dpm = &a8->armv8_common.dpm;
259 int retval;
260
261 dpm->arm = &a8->armv8_common.arm;
262 dpm->didr = debug;
263
264 retval = armv8_dpm_setup(dpm);
265 if (retval == ERROR_OK)
266 retval = armv8_dpm_initialize(dpm);
267
268 return retval;
269 }
270
271 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
272 {
273 struct armv8_common *armv8 = target_to_armv8(target);
274 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
275 }
276
277 static int aarch64_check_state_one(struct target *target,
278 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
279 {
280 struct armv8_common *armv8 = target_to_armv8(target);
281 uint32_t prsr;
282 int retval;
283
284 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
285 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
286 if (retval != ERROR_OK)
287 return retval;
288
289 if (p_prsr)
290 *p_prsr = prsr;
291
292 if (p_result)
293 *p_result = (prsr & mask) == (val & mask);
294
295 return ERROR_OK;
296 }
297
298 static int aarch64_wait_halt_one(struct target *target)
299 {
300 int retval = ERROR_OK;
301 uint32_t prsr;
302
303 int64_t then = timeval_ms();
304 for (;;) {
305 int halted;
306
307 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
308 if (retval != ERROR_OK || halted)
309 break;
310
311 if (timeval_ms() > then + 1000) {
312 retval = ERROR_TARGET_TIMEOUT;
313 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
314 break;
315 }
316 }
317 return retval;
318 }
319
320 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
321 {
322 int retval = ERROR_OK;
323 struct target_list *head = target->head;
324 struct target *first = NULL;
325
326 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
327
328 while (head != NULL) {
329 struct target *curr = head->target;
330 struct armv8_common *armv8 = target_to_armv8(curr);
331 head = head->next;
332
333 if (exc_target && curr == target)
334 continue;
335 if (!target_was_examined(curr))
336 continue;
337 if (curr->state != TARGET_RUNNING)
338 continue;
339
340 /* HACK: mark this target as prepared for halting */
341 curr->debug_reason = DBG_REASON_DBGRQ;
342
343 /* open the gate for channel 0 to let HALT requests pass to the CTM */
344 retval = arm_cti_ungate_channel(armv8->cti, 0);
345 if (retval == ERROR_OK)
346 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
347 if (retval != ERROR_OK)
348 break;
349
350 LOG_DEBUG("target %s prepared", target_name(curr));
351
352 if (first == NULL)
353 first = curr;
354 }
355
356 if (p_first) {
357 if (exc_target && first)
358 *p_first = first;
359 else
360 *p_first = target;
361 }
362
363 return retval;
364 }
365
366 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
367 {
368 int retval = ERROR_OK;
369 struct armv8_common *armv8 = target_to_armv8(target);
370
371 LOG_DEBUG("%s", target_name(target));
372
373 /* allow Halting Debug Mode */
374 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
375 if (retval != ERROR_OK)
376 return retval;
377
378 /* trigger an event on channel 0, this outputs a halt request to the PE */
379 retval = arm_cti_pulse_channel(armv8->cti, 0);
380 if (retval != ERROR_OK)
381 return retval;
382
383 if (mode == HALT_SYNC) {
384 retval = aarch64_wait_halt_one(target);
385 if (retval != ERROR_OK) {
386 if (retval == ERROR_TARGET_TIMEOUT)
387 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
388 return retval;
389 }
390 }
391
392 return ERROR_OK;
393 }
394
395 static int aarch64_halt_smp(struct target *target, bool exc_target)
396 {
397 struct target *next = target;
398 int retval;
399
400 /* prepare halt on all PEs of the group */
401 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
402
403 if (exc_target && next == target)
404 return retval;
405
406 /* halt the target PE */
407 if (retval == ERROR_OK)
408 retval = aarch64_halt_one(next, HALT_LAZY);
409
410 if (retval != ERROR_OK)
411 return retval;
412
413 /* wait for all PEs to halt */
414 int64_t then = timeval_ms();
415 for (;;) {
416 bool all_halted = true;
417 struct target_list *head;
418 struct target *curr;
419
420 foreach_smp_target(head, target->head) {
421 int halted;
422
423 curr = head->target;
424
425 if (!target_was_examined(curr))
426 continue;
427
428 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
429 if (retval != ERROR_OK || !halted) {
430 all_halted = false;
431 break;
432 }
433 }
434
435 if (all_halted)
436 break;
437
438 if (timeval_ms() > then + 1000) {
439 retval = ERROR_TARGET_TIMEOUT;
440 break;
441 }
442
443 /*
444 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
445 * and it looks like the CTI's are not connected by a common
446 * trigger matrix. It seems that we need to halt one core in each
447 * cluster explicitly. So if we find that a core has not halted
448 * yet, we trigger an explicit halt for the second cluster.
449 */
450 retval = aarch64_halt_one(curr, HALT_LAZY);
451 if (retval != ERROR_OK)
452 break;
453 }
454
455 return retval;
456 }
457
458 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
459 {
460 struct target *gdb_target = NULL;
461 struct target_list *head;
462 struct target *curr;
463
464 if (debug_reason == DBG_REASON_NOTHALTED) {
465 LOG_DEBUG("Halting remaining targets in SMP group");
466 aarch64_halt_smp(target, true);
467 }
468
469 /* poll all targets in the group, but skip the target that serves GDB */
470 foreach_smp_target(head, target->head) {
471 curr = head->target;
472 /* skip calling context */
473 if (curr == target)
474 continue;
475 if (!target_was_examined(curr))
476 continue;
477 /* skip targets that were already halted */
478 if (curr->state == TARGET_HALTED)
479 continue;
480 /* remember the gdb_service->target */
481 if (curr->gdb_service != NULL)
482 gdb_target = curr->gdb_service->target;
483 /* skip it */
484 if (curr == gdb_target)
485 continue;
486
487 /* avoid recursion in aarch64_poll() */
488 curr->smp = 0;
489 aarch64_poll(curr);
490 curr->smp = 1;
491 }
492
493 /* after all targets were updated, poll the gdb serving target */
494 if (gdb_target != NULL && gdb_target != target)
495 aarch64_poll(gdb_target);
496
497 return ERROR_OK;
498 }
499
500 /*
501 * Aarch64 Run control
502 */
503
504 static int aarch64_poll(struct target *target)
505 {
506 enum target_state prev_target_state;
507 int retval = ERROR_OK;
508 int halted;
509
510 retval = aarch64_check_state_one(target,
511 PRSR_HALT, PRSR_HALT, &halted, NULL);
512 if (retval != ERROR_OK)
513 return retval;
514
515 if (halted) {
516 prev_target_state = target->state;
517 if (prev_target_state != TARGET_HALTED) {
518 enum target_debug_reason debug_reason = target->debug_reason;
519
520 /* We have a halting debug event */
521 target->state = TARGET_HALTED;
522 LOG_DEBUG("Target %s halted", target_name(target));
523 retval = aarch64_debug_entry(target);
524 if (retval != ERROR_OK)
525 return retval;
526
527 if (target->smp)
528 update_halt_gdb(target, debug_reason);
529
530 if (arm_semihosting(target, &retval) != 0)
531 return retval;
532
533 switch (prev_target_state) {
534 case TARGET_RUNNING:
535 case TARGET_UNKNOWN:
536 case TARGET_RESET:
537 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
538 break;
539 case TARGET_DEBUG_RUNNING:
540 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
541 break;
542 default:
543 break;
544 }
545 }
546 } else
547 target->state = TARGET_RUNNING;
548
549 return retval;
550 }
551
552 static int aarch64_halt(struct target *target)
553 {
554 struct armv8_common *armv8 = target_to_armv8(target);
555 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
556
557 if (target->smp)
558 return aarch64_halt_smp(target, false);
559
560 return aarch64_halt_one(target, HALT_SYNC);
561 }
562
563 static int aarch64_restore_one(struct target *target, int current,
564 uint64_t *address, int handle_breakpoints, int debug_execution)
565 {
566 struct armv8_common *armv8 = target_to_armv8(target);
567 struct arm *arm = &armv8->arm;
568 int retval;
569 uint64_t resume_pc;
570
571 LOG_DEBUG("%s", target_name(target));
572
573 if (!debug_execution)
574 target_free_all_working_areas(target);
575
576 /* current = 1: continue on current pc, otherwise continue at <address> */
577 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
578 if (!current)
579 resume_pc = *address;
580 else
581 *address = resume_pc;
582
583 /* Make sure that the Armv7 gdb thumb fixups does not
584 * kill the return address
585 */
586 switch (arm->core_state) {
587 case ARM_STATE_ARM:
588 resume_pc &= 0xFFFFFFFC;
589 break;
590 case ARM_STATE_AARCH64:
591 resume_pc &= 0xFFFFFFFFFFFFFFFC;
592 break;
593 case ARM_STATE_THUMB:
594 case ARM_STATE_THUMB_EE:
595 /* When the return address is loaded into PC
596 * bit 0 must be 1 to stay in Thumb state
597 */
598 resume_pc |= 0x1;
599 break;
600 case ARM_STATE_JAZELLE:
601 LOG_ERROR("How do I resume into Jazelle state??");
602 return ERROR_FAIL;
603 }
604 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
605 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
606 arm->pc->dirty = true;
607 arm->pc->valid = true;
608
609 /* called it now before restoring context because it uses cpu
610 * register r0 for restoring system control register */
611 retval = aarch64_restore_system_control_reg(target);
612 if (retval == ERROR_OK)
613 retval = aarch64_restore_context(target, handle_breakpoints);
614
615 return retval;
616 }
617
618 /**
619 * prepare single target for restart
620 *
621 *
622 */
623 static int aarch64_prepare_restart_one(struct target *target)
624 {
625 struct armv8_common *armv8 = target_to_armv8(target);
626 int retval;
627 uint32_t dscr;
628 uint32_t tmp;
629
630 LOG_DEBUG("%s", target_name(target));
631
632 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
633 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
634 if (retval != ERROR_OK)
635 return retval;
636
637 if ((dscr & DSCR_ITE) == 0)
638 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
639 if ((dscr & DSCR_ERR) != 0)
640 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
641
642 /* acknowledge a pending CTI halt event */
643 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
644 /*
645 * open the CTI gate for channel 1 so that the restart events
646 * get passed along to all PEs. Also close gate for channel 0
647 * to isolate the PE from halt events.
648 */
649 if (retval == ERROR_OK)
650 retval = arm_cti_ungate_channel(armv8->cti, 1);
651 if (retval == ERROR_OK)
652 retval = arm_cti_gate_channel(armv8->cti, 0);
653
654 /* make sure that DSCR.HDE is set */
655 if (retval == ERROR_OK) {
656 dscr |= DSCR_HDE;
657 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
658 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
659 }
660
661 if (retval == ERROR_OK) {
662 /* clear sticky bits in PRSR, SDR is now 0 */
663 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
664 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
665 }
666
667 return retval;
668 }
669
670 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
671 {
672 struct armv8_common *armv8 = target_to_armv8(target);
673 int retval;
674
675 LOG_DEBUG("%s", target_name(target));
676
677 /* trigger an event on channel 1, generates a restart request to the PE */
678 retval = arm_cti_pulse_channel(armv8->cti, 1);
679 if (retval != ERROR_OK)
680 return retval;
681
682 if (mode == RESTART_SYNC) {
683 int64_t then = timeval_ms();
684 for (;;) {
685 int resumed;
686 /*
687 * if PRSR.SDR is set now, the target did restart, even
688 * if it's now already halted again (e.g. due to breakpoint)
689 */
690 retval = aarch64_check_state_one(target,
691 PRSR_SDR, PRSR_SDR, &resumed, NULL);
692 if (retval != ERROR_OK || resumed)
693 break;
694
695 if (timeval_ms() > then + 1000) {
696 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
697 retval = ERROR_TARGET_TIMEOUT;
698 break;
699 }
700 }
701 }
702
703 if (retval != ERROR_OK)
704 return retval;
705
706 target->debug_reason = DBG_REASON_NOTHALTED;
707 target->state = TARGET_RUNNING;
708
709 return ERROR_OK;
710 }
711
712 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
713 {
714 int retval;
715
716 LOG_DEBUG("%s", target_name(target));
717
718 retval = aarch64_prepare_restart_one(target);
719 if (retval == ERROR_OK)
720 retval = aarch64_do_restart_one(target, mode);
721
722 return retval;
723 }
724
725 /*
726 * prepare all but the current target for restart
727 */
728 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
729 {
730 int retval = ERROR_OK;
731 struct target_list *head;
732 struct target *first = NULL;
733 uint64_t address;
734
735 foreach_smp_target(head, target->head) {
736 struct target *curr = head->target;
737
738 /* skip calling target */
739 if (curr == target)
740 continue;
741 if (!target_was_examined(curr))
742 continue;
743 if (curr->state != TARGET_HALTED)
744 continue;
745
746 /* resume at current address, not in step mode */
747 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
748 if (retval == ERROR_OK)
749 retval = aarch64_prepare_restart_one(curr);
750 if (retval != ERROR_OK) {
751 LOG_ERROR("failed to restore target %s", target_name(curr));
752 break;
753 }
754 /* remember the first valid target in the group */
755 if (first == NULL)
756 first = curr;
757 }
758
759 if (p_first)
760 *p_first = first;
761
762 return retval;
763 }
764
765
766 static int aarch64_step_restart_smp(struct target *target)
767 {
768 int retval = ERROR_OK;
769 struct target_list *head;
770 struct target *first = NULL;
771
772 LOG_DEBUG("%s", target_name(target));
773
774 retval = aarch64_prep_restart_smp(target, 0, &first);
775 if (retval != ERROR_OK)
776 return retval;
777
778 if (first != NULL)
779 retval = aarch64_do_restart_one(first, RESTART_LAZY);
780 if (retval != ERROR_OK) {
781 LOG_DEBUG("error restarting target %s", target_name(first));
782 return retval;
783 }
784
785 int64_t then = timeval_ms();
786 for (;;) {
787 struct target *curr = target;
788 bool all_resumed = true;
789
790 foreach_smp_target(head, target->head) {
791 uint32_t prsr;
792 int resumed;
793
794 curr = head->target;
795
796 if (curr == target)
797 continue;
798
799 if (!target_was_examined(curr))
800 continue;
801
802 retval = aarch64_check_state_one(curr,
803 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
804 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
805 all_resumed = false;
806 break;
807 }
808
809 if (curr->state != TARGET_RUNNING) {
810 curr->state = TARGET_RUNNING;
811 curr->debug_reason = DBG_REASON_NOTHALTED;
812 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
813 }
814 }
815
816 if (all_resumed)
817 break;
818
819 if (timeval_ms() > then + 1000) {
820 LOG_ERROR("%s: timeout waiting for target resume", __func__);
821 retval = ERROR_TARGET_TIMEOUT;
822 break;
823 }
824 /*
825 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
826 * and it looks like the CTI's are not connected by a common
827 * trigger matrix. It seems that we need to halt one core in each
828 * cluster explicitly. So if we find that a core has not halted
829 * yet, we trigger an explicit resume for the second cluster.
830 */
831 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
832 if (retval != ERROR_OK)
833 break;
834 }
835
836 return retval;
837 }
838
839 static int aarch64_resume(struct target *target, int current,
840 target_addr_t address, int handle_breakpoints, int debug_execution)
841 {
842 int retval = 0;
843 uint64_t addr = address;
844
845 struct armv8_common *armv8 = target_to_armv8(target);
846 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
847
848 if (target->state != TARGET_HALTED)
849 return ERROR_TARGET_NOT_HALTED;
850
851 /*
852 * If this target is part of a SMP group, prepare the others
853 * targets for resuming. This involves restoring the complete
854 * target register context and setting up CTI gates to accept
855 * resume events from the trigger matrix.
856 */
857 if (target->smp) {
858 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
859 if (retval != ERROR_OK)
860 return retval;
861 }
862
863 /* all targets prepared, restore and restart the current target */
864 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
865 debug_execution);
866 if (retval == ERROR_OK)
867 retval = aarch64_restart_one(target, RESTART_SYNC);
868 if (retval != ERROR_OK)
869 return retval;
870
871 if (target->smp) {
872 int64_t then = timeval_ms();
873 for (;;) {
874 struct target *curr = target;
875 struct target_list *head;
876 bool all_resumed = true;
877
878 foreach_smp_target(head, target->head) {
879 uint32_t prsr;
880 int resumed;
881
882 curr = head->target;
883 if (curr == target)
884 continue;
885 if (!target_was_examined(curr))
886 continue;
887
888 retval = aarch64_check_state_one(curr,
889 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
890 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
891 all_resumed = false;
892 break;
893 }
894
895 if (curr->state != TARGET_RUNNING) {
896 curr->state = TARGET_RUNNING;
897 curr->debug_reason = DBG_REASON_NOTHALTED;
898 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
899 }
900 }
901
902 if (all_resumed)
903 break;
904
905 if (timeval_ms() > then + 1000) {
906 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
907 retval = ERROR_TARGET_TIMEOUT;
908 break;
909 }
910
911 /*
912 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
913 * and it looks like the CTI's are not connected by a common
914 * trigger matrix. It seems that we need to halt one core in each
915 * cluster explicitly. So if we find that a core has not halted
916 * yet, we trigger an explicit resume for the second cluster.
917 */
918 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
919 if (retval != ERROR_OK)
920 break;
921 }
922 }
923
924 if (retval != ERROR_OK)
925 return retval;
926
927 target->debug_reason = DBG_REASON_NOTHALTED;
928
929 if (!debug_execution) {
930 target->state = TARGET_RUNNING;
931 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
932 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
933 } else {
934 target->state = TARGET_DEBUG_RUNNING;
935 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
936 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
937 }
938
939 return ERROR_OK;
940 }
941
942 static int aarch64_debug_entry(struct target *target)
943 {
944 int retval = ERROR_OK;
945 struct armv8_common *armv8 = target_to_armv8(target);
946 struct arm_dpm *dpm = &armv8->dpm;
947 enum arm_state core_state;
948 uint32_t dscr;
949
950 /* make sure to clear all sticky errors */
951 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
952 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
953 if (retval == ERROR_OK)
954 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
956 if (retval == ERROR_OK)
957 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
958
959 if (retval != ERROR_OK)
960 return retval;
961
962 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
963
964 dpm->dscr = dscr;
965 core_state = armv8_dpm_get_core_state(dpm);
966 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
967 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
968
969 /* close the CTI gate for all events */
970 if (retval == ERROR_OK)
971 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
972 /* discard async exceptions */
973 if (retval == ERROR_OK)
974 retval = dpm->instr_cpsr_sync(dpm);
975 if (retval != ERROR_OK)
976 return retval;
977
978 /* Examine debug reason */
979 armv8_dpm_report_dscr(dpm, dscr);
980
981 /* save address of instruction that triggered the watchpoint? */
982 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
983 uint32_t tmp;
984 uint64_t wfar = 0;
985
986 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
987 armv8->debug_base + CPUV8_DBG_WFAR1,
988 &tmp);
989 if (retval != ERROR_OK)
990 return retval;
991 wfar = tmp;
992 wfar = (wfar << 32);
993 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
994 armv8->debug_base + CPUV8_DBG_WFAR0,
995 &tmp);
996 if (retval != ERROR_OK)
997 return retval;
998 wfar |= tmp;
999 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1000 }
1001
1002 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1003
1004 if (retval == ERROR_OK && armv8->post_debug_entry)
1005 retval = armv8->post_debug_entry(target);
1006
1007 return retval;
1008 }
1009
1010 static int aarch64_post_debug_entry(struct target *target)
1011 {
1012 struct aarch64_common *aarch64 = target_to_aarch64(target);
1013 struct armv8_common *armv8 = &aarch64->armv8_common;
1014 int retval;
1015 enum arm_mode target_mode = ARM_MODE_ANY;
1016 uint32_t instr;
1017
1018 switch (armv8->arm.core_mode) {
1019 case ARMV8_64_EL0T:
1020 target_mode = ARMV8_64_EL1H;
1021 /* fall through */
1022 case ARMV8_64_EL1T:
1023 case ARMV8_64_EL1H:
1024 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1025 break;
1026 case ARMV8_64_EL2T:
1027 case ARMV8_64_EL2H:
1028 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1029 break;
1030 case ARMV8_64_EL3H:
1031 case ARMV8_64_EL3T:
1032 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1033 break;
1034
1035 case ARM_MODE_SVC:
1036 case ARM_MODE_ABT:
1037 case ARM_MODE_FIQ:
1038 case ARM_MODE_IRQ:
1039 case ARM_MODE_HYP:
1040 case ARM_MODE_SYS:
1041 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1042 break;
1043
1044 default:
1045 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1046 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1047 return ERROR_FAIL;
1048 }
1049
1050 if (target_mode != ARM_MODE_ANY)
1051 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1052
1053 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1054 if (retval != ERROR_OK)
1055 return retval;
1056
1057 if (target_mode != ARM_MODE_ANY)
1058 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1059
1060 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1061 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1062
1063 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1064 armv8_identify_cache(armv8);
1065 armv8_read_mpidr(armv8);
1066 }
1067
1068 armv8->armv8_mmu.mmu_enabled =
1069 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1070 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1071 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1072 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1073 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1074 return ERROR_OK;
1075 }
1076
1077 /*
1078 * single-step a target
1079 */
1080 static int aarch64_step(struct target *target, int current, target_addr_t address,
1081 int handle_breakpoints)
1082 {
1083 struct armv8_common *armv8 = target_to_armv8(target);
1084 struct aarch64_common *aarch64 = target_to_aarch64(target);
1085 int saved_retval = ERROR_OK;
1086 int retval;
1087 uint32_t edecr;
1088
1089 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1090
1091 if (target->state != TARGET_HALTED) {
1092 LOG_WARNING("target not halted");
1093 return ERROR_TARGET_NOT_HALTED;
1094 }
1095
1096 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1097 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1098 /* make sure EDECR.SS is not set when restoring the register */
1099
1100 if (retval == ERROR_OK) {
1101 edecr &= ~0x4;
1102 /* set EDECR.SS to enter hardware step mode */
1103 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1104 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1105 }
1106 /* disable interrupts while stepping */
1107 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1108 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1109 /* bail out if stepping setup has failed */
1110 if (retval != ERROR_OK)
1111 return retval;
1112
1113 if (target->smp && (current == 1)) {
1114 /*
1115 * isolate current target so that it doesn't get resumed
1116 * together with the others
1117 */
1118 retval = arm_cti_gate_channel(armv8->cti, 1);
1119 /* resume all other targets in the group */
1120 if (retval == ERROR_OK)
1121 retval = aarch64_step_restart_smp(target);
1122 if (retval != ERROR_OK) {
1123 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1124 return retval;
1125 }
1126 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1127 }
1128
1129 /* all other targets running, restore and restart the current target */
1130 retval = aarch64_restore_one(target, current, &address, 0, 0);
1131 if (retval == ERROR_OK)
1132 retval = aarch64_restart_one(target, RESTART_LAZY);
1133
1134 if (retval != ERROR_OK)
1135 return retval;
1136
1137 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1138 if (!handle_breakpoints)
1139 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1140
1141 int64_t then = timeval_ms();
1142 for (;;) {
1143 int stepped;
1144 uint32_t prsr;
1145
1146 retval = aarch64_check_state_one(target,
1147 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1148 if (retval != ERROR_OK || stepped)
1149 break;
1150
1151 if (timeval_ms() > then + 100) {
1152 LOG_ERROR("timeout waiting for target %s halt after step",
1153 target_name(target));
1154 retval = ERROR_TARGET_TIMEOUT;
1155 break;
1156 }
1157 }
1158
1159 /*
1160 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1161 * causes a timeout. The core takes the step but doesn't complete it and so
1162 * debug state is never entered. However, you can manually halt the core
1163 * as an external debug even is also a WFI wakeup event.
1164 */
1165 if (retval == ERROR_TARGET_TIMEOUT)
1166 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1167
1168 /* restore EDECR */
1169 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1170 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1171 if (retval != ERROR_OK)
1172 return retval;
1173
1174 /* restore interrupts */
1175 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1176 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1177 if (retval != ERROR_OK)
1178 return ERROR_OK;
1179 }
1180
1181 if (saved_retval != ERROR_OK)
1182 return saved_retval;
1183
1184 return ERROR_OK;
1185 }
1186
1187 static int aarch64_restore_context(struct target *target, bool bpwp)
1188 {
1189 struct armv8_common *armv8 = target_to_armv8(target);
1190 struct arm *arm = &armv8->arm;
1191
1192 int retval;
1193
1194 LOG_DEBUG("%s", target_name(target));
1195
1196 if (armv8->pre_restore_context)
1197 armv8->pre_restore_context(target);
1198
1199 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1200 if (retval == ERROR_OK) {
1201 /* registers are now invalid */
1202 register_cache_invalidate(arm->core_cache);
1203 register_cache_invalidate(arm->core_cache->next);
1204 }
1205
1206 return retval;
1207 }
1208
1209 /*
1210 * Cortex-A8 Breakpoint and watchpoint functions
1211 */
1212
1213 /* Setup hardware Breakpoint Register Pair */
1214 static int aarch64_set_breakpoint(struct target *target,
1215 struct breakpoint *breakpoint, uint8_t matchmode)
1216 {
1217 int retval;
1218 int brp_i = 0;
1219 uint32_t control;
1220 uint8_t byte_addr_select = 0x0F;
1221 struct aarch64_common *aarch64 = target_to_aarch64(target);
1222 struct armv8_common *armv8 = &aarch64->armv8_common;
1223 struct aarch64_brp *brp_list = aarch64->brp_list;
1224
1225 if (breakpoint->set) {
1226 LOG_WARNING("breakpoint already set");
1227 return ERROR_OK;
1228 }
1229
1230 if (breakpoint->type == BKPT_HARD) {
1231 int64_t bpt_value;
1232 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1233 brp_i++;
1234 if (brp_i >= aarch64->brp_num) {
1235 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1236 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1237 }
1238 breakpoint->set = brp_i + 1;
1239 if (breakpoint->length == 2)
1240 byte_addr_select = (3 << (breakpoint->address & 0x02));
1241 control = ((matchmode & 0x7) << 20)
1242 | (1 << 13)
1243 | (byte_addr_select << 5)
1244 | (3 << 1) | 1;
1245 brp_list[brp_i].used = 1;
1246 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1247 brp_list[brp_i].control = control;
1248 bpt_value = brp_list[brp_i].value;
1249
1250 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1251 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1252 (uint32_t)(bpt_value & 0xFFFFFFFF));
1253 if (retval != ERROR_OK)
1254 return retval;
1255 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1256 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1257 (uint32_t)(bpt_value >> 32));
1258 if (retval != ERROR_OK)
1259 return retval;
1260
1261 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1262 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1263 brp_list[brp_i].control);
1264 if (retval != ERROR_OK)
1265 return retval;
1266 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1267 brp_list[brp_i].control,
1268 brp_list[brp_i].value);
1269
1270 } else if (breakpoint->type == BKPT_SOFT) {
1271 uint32_t opcode;
1272 uint8_t code[4];
1273
1274 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1275 opcode = ARMV8_HLT(11);
1276
1277 if (breakpoint->length != 4)
1278 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1279 } else {
1280 /**
1281 * core_state is ARM_STATE_ARM
1282 * in that case the opcode depends on breakpoint length:
1283 * - if length == 4 => A32 opcode
1284 * - if length == 2 => T32 opcode
1285 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1286 * in that case the length should be changed from 3 to 4 bytes
1287 **/
1288 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1289 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1290
1291 if (breakpoint->length == 3)
1292 breakpoint->length = 4;
1293 }
1294
1295 buf_set_u32(code, 0, 32, opcode);
1296
1297 retval = target_read_memory(target,
1298 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1299 breakpoint->length, 1,
1300 breakpoint->orig_instr);
1301 if (retval != ERROR_OK)
1302 return retval;
1303
1304 armv8_cache_d_inner_flush_virt(armv8,
1305 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1306 breakpoint->length);
1307
1308 retval = target_write_memory(target,
1309 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1310 breakpoint->length, 1, code);
1311 if (retval != ERROR_OK)
1312 return retval;
1313
1314 armv8_cache_d_inner_flush_virt(armv8,
1315 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1316 breakpoint->length);
1317
1318 armv8_cache_i_inner_inval_virt(armv8,
1319 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1320 breakpoint->length);
1321
1322 breakpoint->set = 0x11; /* Any nice value but 0 */
1323 }
1324
1325 /* Ensure that halting debug mode is enable */
1326 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1327 if (retval != ERROR_OK) {
1328 LOG_DEBUG("Failed to set DSCR.HDE");
1329 return retval;
1330 }
1331
1332 return ERROR_OK;
1333 }
1334
1335 static int aarch64_set_context_breakpoint(struct target *target,
1336 struct breakpoint *breakpoint, uint8_t matchmode)
1337 {
1338 int retval = ERROR_FAIL;
1339 int brp_i = 0;
1340 uint32_t control;
1341 uint8_t byte_addr_select = 0x0F;
1342 struct aarch64_common *aarch64 = target_to_aarch64(target);
1343 struct armv8_common *armv8 = &aarch64->armv8_common;
1344 struct aarch64_brp *brp_list = aarch64->brp_list;
1345
1346 if (breakpoint->set) {
1347 LOG_WARNING("breakpoint already set");
1348 return retval;
1349 }
1350 /*check available context BRPs*/
1351 while ((brp_list[brp_i].used ||
1352 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1353 brp_i++;
1354
1355 if (brp_i >= aarch64->brp_num) {
1356 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1357 return ERROR_FAIL;
1358 }
1359
1360 breakpoint->set = brp_i + 1;
1361 control = ((matchmode & 0x7) << 20)
1362 | (1 << 13)
1363 | (byte_addr_select << 5)
1364 | (3 << 1) | 1;
1365 brp_list[brp_i].used = 1;
1366 brp_list[brp_i].value = (breakpoint->asid);
1367 brp_list[brp_i].control = control;
1368 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1369 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1370 brp_list[brp_i].value);
1371 if (retval != ERROR_OK)
1372 return retval;
1373 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1374 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1375 brp_list[brp_i].control);
1376 if (retval != ERROR_OK)
1377 return retval;
1378 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1379 brp_list[brp_i].control,
1380 brp_list[brp_i].value);
1381 return ERROR_OK;
1382
1383 }
1384
1385 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1386 {
1387 int retval = ERROR_FAIL;
1388 int brp_1 = 0; /* holds the contextID pair */
1389 int brp_2 = 0; /* holds the IVA pair */
1390 uint32_t control_CTX, control_IVA;
1391 uint8_t CTX_byte_addr_select = 0x0F;
1392 uint8_t IVA_byte_addr_select = 0x0F;
1393 uint8_t CTX_machmode = 0x03;
1394 uint8_t IVA_machmode = 0x01;
1395 struct aarch64_common *aarch64 = target_to_aarch64(target);
1396 struct armv8_common *armv8 = &aarch64->armv8_common;
1397 struct aarch64_brp *brp_list = aarch64->brp_list;
1398
1399 if (breakpoint->set) {
1400 LOG_WARNING("breakpoint already set");
1401 return retval;
1402 }
1403 /*check available context BRPs*/
1404 while ((brp_list[brp_1].used ||
1405 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1406 brp_1++;
1407
1408 printf("brp(CTX) found num: %d\n", brp_1);
1409 if (brp_1 >= aarch64->brp_num) {
1410 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1411 return ERROR_FAIL;
1412 }
1413
1414 while ((brp_list[brp_2].used ||
1415 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1416 brp_2++;
1417
1418 printf("brp(IVA) found num: %d\n", brp_2);
1419 if (brp_2 >= aarch64->brp_num) {
1420 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1421 return ERROR_FAIL;
1422 }
1423
1424 breakpoint->set = brp_1 + 1;
1425 breakpoint->linked_BRP = brp_2;
1426 control_CTX = ((CTX_machmode & 0x7) << 20)
1427 | (brp_2 << 16)
1428 | (0 << 14)
1429 | (CTX_byte_addr_select << 5)
1430 | (3 << 1) | 1;
1431 brp_list[brp_1].used = 1;
1432 brp_list[brp_1].value = (breakpoint->asid);
1433 brp_list[brp_1].control = control_CTX;
1434 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1435 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1436 brp_list[brp_1].value);
1437 if (retval != ERROR_OK)
1438 return retval;
1439 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1440 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1441 brp_list[brp_1].control);
1442 if (retval != ERROR_OK)
1443 return retval;
1444
1445 control_IVA = ((IVA_machmode & 0x7) << 20)
1446 | (brp_1 << 16)
1447 | (1 << 13)
1448 | (IVA_byte_addr_select << 5)
1449 | (3 << 1) | 1;
1450 brp_list[brp_2].used = 1;
1451 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1452 brp_list[brp_2].control = control_IVA;
1453 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1454 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1455 brp_list[brp_2].value & 0xFFFFFFFF);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1459 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1460 brp_list[brp_2].value >> 32);
1461 if (retval != ERROR_OK)
1462 return retval;
1463 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1464 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1465 brp_list[brp_2].control);
1466 if (retval != ERROR_OK)
1467 return retval;
1468
1469 return ERROR_OK;
1470 }
1471
1472 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1473 {
1474 int retval;
1475 struct aarch64_common *aarch64 = target_to_aarch64(target);
1476 struct armv8_common *armv8 = &aarch64->armv8_common;
1477 struct aarch64_brp *brp_list = aarch64->brp_list;
1478
1479 if (!breakpoint->set) {
1480 LOG_WARNING("breakpoint not set");
1481 return ERROR_OK;
1482 }
1483
1484 if (breakpoint->type == BKPT_HARD) {
1485 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1486 int brp_i = breakpoint->set - 1;
1487 int brp_j = breakpoint->linked_BRP;
1488 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1489 LOG_DEBUG("Invalid BRP number in breakpoint");
1490 return ERROR_OK;
1491 }
1492 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1493 brp_list[brp_i].control, brp_list[brp_i].value);
1494 brp_list[brp_i].used = 0;
1495 brp_list[brp_i].value = 0;
1496 brp_list[brp_i].control = 0;
1497 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1498 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1499 brp_list[brp_i].control);
1500 if (retval != ERROR_OK)
1501 return retval;
1502 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1503 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1504 (uint32_t)brp_list[brp_i].value);
1505 if (retval != ERROR_OK)
1506 return retval;
1507 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1508 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1509 (uint32_t)brp_list[brp_i].value);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1513 LOG_DEBUG("Invalid BRP number in breakpoint");
1514 return ERROR_OK;
1515 }
1516 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1517 brp_list[brp_j].control, brp_list[brp_j].value);
1518 brp_list[brp_j].used = 0;
1519 brp_list[brp_j].value = 0;
1520 brp_list[brp_j].control = 0;
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1523 brp_list[brp_j].control);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1527 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1528 (uint32_t)brp_list[brp_j].value);
1529 if (retval != ERROR_OK)
1530 return retval;
1531 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1532 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1533 (uint32_t)brp_list[brp_j].value);
1534 if (retval != ERROR_OK)
1535 return retval;
1536
1537 breakpoint->linked_BRP = 0;
1538 breakpoint->set = 0;
1539 return ERROR_OK;
1540
1541 } else {
1542 int brp_i = breakpoint->set - 1;
1543 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1544 LOG_DEBUG("Invalid BRP number in breakpoint");
1545 return ERROR_OK;
1546 }
1547 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1548 brp_list[brp_i].control, brp_list[brp_i].value);
1549 brp_list[brp_i].used = 0;
1550 brp_list[brp_i].value = 0;
1551 brp_list[brp_i].control = 0;
1552 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1553 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1554 brp_list[brp_i].control);
1555 if (retval != ERROR_OK)
1556 return retval;
1557 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1558 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1559 brp_list[brp_i].value);
1560 if (retval != ERROR_OK)
1561 return retval;
1562
1563 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1564 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1565 (uint32_t)brp_list[brp_i].value);
1566 if (retval != ERROR_OK)
1567 return retval;
1568 breakpoint->set = 0;
1569 return ERROR_OK;
1570 }
1571 } else {
1572 /* restore original instruction (kept in target endianness) */
1573
1574 armv8_cache_d_inner_flush_virt(armv8,
1575 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1576 breakpoint->length);
1577
1578 if (breakpoint->length == 4) {
1579 retval = target_write_memory(target,
1580 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1581 4, 1, breakpoint->orig_instr);
1582 if (retval != ERROR_OK)
1583 return retval;
1584 } else {
1585 retval = target_write_memory(target,
1586 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1587 2, 1, breakpoint->orig_instr);
1588 if (retval != ERROR_OK)
1589 return retval;
1590 }
1591
1592 armv8_cache_d_inner_flush_virt(armv8,
1593 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1594 breakpoint->length);
1595
1596 armv8_cache_i_inner_inval_virt(armv8,
1597 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1598 breakpoint->length);
1599 }
1600 breakpoint->set = 0;
1601
1602 return ERROR_OK;
1603 }
1604
1605 static int aarch64_add_breakpoint(struct target *target,
1606 struct breakpoint *breakpoint)
1607 {
1608 struct aarch64_common *aarch64 = target_to_aarch64(target);
1609
1610 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1611 LOG_INFO("no hardware breakpoint available");
1612 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1613 }
1614
1615 if (breakpoint->type == BKPT_HARD)
1616 aarch64->brp_num_available--;
1617
1618 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1619 }
1620
1621 static int aarch64_add_context_breakpoint(struct target *target,
1622 struct breakpoint *breakpoint)
1623 {
1624 struct aarch64_common *aarch64 = target_to_aarch64(target);
1625
1626 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1627 LOG_INFO("no hardware breakpoint available");
1628 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1629 }
1630
1631 if (breakpoint->type == BKPT_HARD)
1632 aarch64->brp_num_available--;
1633
1634 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1635 }
1636
1637 static int aarch64_add_hybrid_breakpoint(struct target *target,
1638 struct breakpoint *breakpoint)
1639 {
1640 struct aarch64_common *aarch64 = target_to_aarch64(target);
1641
1642 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1643 LOG_INFO("no hardware breakpoint available");
1644 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1645 }
1646
1647 if (breakpoint->type == BKPT_HARD)
1648 aarch64->brp_num_available--;
1649
1650 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1651 }
1652
1653
1654 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1655 {
1656 struct aarch64_common *aarch64 = target_to_aarch64(target);
1657
1658 #if 0
1659 /* It is perfectly possible to remove breakpoints while the target is running */
1660 if (target->state != TARGET_HALTED) {
1661 LOG_WARNING("target not halted");
1662 return ERROR_TARGET_NOT_HALTED;
1663 }
1664 #endif
1665
1666 if (breakpoint->set) {
1667 aarch64_unset_breakpoint(target, breakpoint);
1668 if (breakpoint->type == BKPT_HARD)
1669 aarch64->brp_num_available++;
1670 }
1671
1672 return ERROR_OK;
1673 }
1674
1675 /*
1676 * Cortex-A8 Reset functions
1677 */
1678
1679 static int aarch64_assert_reset(struct target *target)
1680 {
1681 struct armv8_common *armv8 = target_to_armv8(target);
1682
1683 LOG_DEBUG(" ");
1684
1685 /* FIXME when halt is requested, make it work somehow... */
1686
1687 /* Issue some kind of warm reset. */
1688 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1689 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1690 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1691 /* REVISIT handle "pulls" cases, if there's
1692 * hardware that needs them to work.
1693 */
1694 adapter_assert_reset();
1695 } else {
1696 LOG_ERROR("%s: how to reset?", target_name(target));
1697 return ERROR_FAIL;
1698 }
1699
1700 /* registers are now invalid */
1701 if (target_was_examined(target)) {
1702 register_cache_invalidate(armv8->arm.core_cache);
1703 register_cache_invalidate(armv8->arm.core_cache->next);
1704 }
1705
1706 target->state = TARGET_RESET;
1707
1708 return ERROR_OK;
1709 }
1710
1711 static int aarch64_deassert_reset(struct target *target)
1712 {
1713 int retval;
1714
1715 LOG_DEBUG(" ");
1716
1717 /* be certain SRST is off */
1718 adapter_deassert_reset();
1719
1720 if (!target_was_examined(target))
1721 return ERROR_OK;
1722
1723 retval = aarch64_poll(target);
1724 if (retval != ERROR_OK)
1725 return retval;
1726
1727 retval = aarch64_init_debug_access(target);
1728 if (retval != ERROR_OK)
1729 return retval;
1730
1731 if (target->reset_halt) {
1732 if (target->state != TARGET_HALTED) {
1733 LOG_WARNING("%s: ran after reset and before halt ...",
1734 target_name(target));
1735 retval = target_halt(target);
1736 }
1737 }
1738
1739 return retval;
1740 }
1741
1742 static int aarch64_write_cpu_memory_slow(struct target *target,
1743 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1744 {
1745 struct armv8_common *armv8 = target_to_armv8(target);
1746 struct arm_dpm *dpm = &armv8->dpm;
1747 struct arm *arm = &armv8->arm;
1748 int retval;
1749
1750 armv8_reg_current(arm, 1)->dirty = true;
1751
1752 /* change DCC to normal mode if necessary */
1753 if (*dscr & DSCR_MA) {
1754 *dscr &= ~DSCR_MA;
1755 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1756 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1757 if (retval != ERROR_OK)
1758 return retval;
1759 }
1760
1761 while (count) {
1762 uint32_t data, opcode;
1763
1764 /* write the data to store into DTRRX */
1765 if (size == 1)
1766 data = *buffer;
1767 else if (size == 2)
1768 data = target_buffer_get_u16(target, buffer);
1769 else
1770 data = target_buffer_get_u32(target, buffer);
1771 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1772 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1773 if (retval != ERROR_OK)
1774 return retval;
1775
1776 if (arm->core_state == ARM_STATE_AARCH64)
1777 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1778 else
1779 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1780 if (retval != ERROR_OK)
1781 return retval;
1782
1783 if (size == 1)
1784 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1785 else if (size == 2)
1786 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1787 else
1788 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1789 retval = dpm->instr_execute(dpm, opcode);
1790 if (retval != ERROR_OK)
1791 return retval;
1792
1793 /* Advance */
1794 buffer += size;
1795 --count;
1796 }
1797
1798 return ERROR_OK;
1799 }
1800
1801 static int aarch64_write_cpu_memory_fast(struct target *target,
1802 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1803 {
1804 struct armv8_common *armv8 = target_to_armv8(target);
1805 struct arm *arm = &armv8->arm;
1806 int retval;
1807
1808 armv8_reg_current(arm, 1)->dirty = true;
1809
1810 /* Step 1.d - Change DCC to memory mode */
1811 *dscr |= DSCR_MA;
1812 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1813 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817
1818 /* Step 2.a - Do the write */
1819 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1820 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1821 if (retval != ERROR_OK)
1822 return retval;
1823
1824 /* Step 3.a - Switch DTR mode back to Normal mode */
1825 *dscr &= ~DSCR_MA;
1826 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1827 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1828 if (retval != ERROR_OK)
1829 return retval;
1830
1831 return ERROR_OK;
1832 }
1833
1834 static int aarch64_write_cpu_memory(struct target *target,
1835 uint64_t address, uint32_t size,
1836 uint32_t count, const uint8_t *buffer)
1837 {
1838 /* write memory through APB-AP */
1839 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1840 struct armv8_common *armv8 = target_to_armv8(target);
1841 struct arm_dpm *dpm = &armv8->dpm;
1842 struct arm *arm = &armv8->arm;
1843 uint32_t dscr;
1844
1845 if (target->state != TARGET_HALTED) {
1846 LOG_WARNING("target not halted");
1847 return ERROR_TARGET_NOT_HALTED;
1848 }
1849
1850 /* Mark register X0 as dirty, as it will be used
1851 * for transferring the data.
1852 * It will be restored automatically when exiting
1853 * debug mode
1854 */
1855 armv8_reg_current(arm, 0)->dirty = true;
1856
1857 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1858
1859 /* Read DSCR */
1860 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1861 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1862 if (retval != ERROR_OK)
1863 return retval;
1864
1865 /* Set Normal access mode */
1866 dscr = (dscr & ~DSCR_MA);
1867 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1868 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1869 if (retval != ERROR_OK)
1870 return retval;
1871
1872 if (arm->core_state == ARM_STATE_AARCH64) {
1873 /* Write X0 with value 'address' using write procedure */
1874 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1875 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1876 retval = dpm->instr_write_data_dcc_64(dpm,
1877 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1878 } else {
1879 /* Write R0 with value 'address' using write procedure */
1880 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1881 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1882 retval = dpm->instr_write_data_dcc(dpm,
1883 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1884 }
1885
1886 if (retval != ERROR_OK)
1887 return retval;
1888
1889 if (size == 4 && (address % 4) == 0)
1890 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1891 else
1892 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1893
1894 if (retval != ERROR_OK) {
1895 /* Unset DTR mode */
1896 mem_ap_read_atomic_u32(armv8->debug_ap,
1897 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1898 dscr &= ~DSCR_MA;
1899 mem_ap_write_atomic_u32(armv8->debug_ap,
1900 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1901 }
1902
1903 /* Check for sticky abort flags in the DSCR */
1904 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1905 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1906 if (retval != ERROR_OK)
1907 return retval;
1908
1909 dpm->dscr = dscr;
1910 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1911 /* Abort occurred - clear it and exit */
1912 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1913 armv8_dpm_handle_exception(dpm, true);
1914 return ERROR_FAIL;
1915 }
1916
1917 /* Done */
1918 return ERROR_OK;
1919 }
1920
1921 static int aarch64_read_cpu_memory_slow(struct target *target,
1922 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1923 {
1924 struct armv8_common *armv8 = target_to_armv8(target);
1925 struct arm_dpm *dpm = &armv8->dpm;
1926 struct arm *arm = &armv8->arm;
1927 int retval;
1928
1929 armv8_reg_current(arm, 1)->dirty = true;
1930
1931 /* change DCC to normal mode (if necessary) */
1932 if (*dscr & DSCR_MA) {
1933 *dscr &= DSCR_MA;
1934 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1935 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1936 if (retval != ERROR_OK)
1937 return retval;
1938 }
1939
1940 while (count) {
1941 uint32_t opcode, data;
1942
1943 if (size == 1)
1944 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1945 else if (size == 2)
1946 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1947 else
1948 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1949 retval = dpm->instr_execute(dpm, opcode);
1950 if (retval != ERROR_OK)
1951 return retval;
1952
1953 if (arm->core_state == ARM_STATE_AARCH64)
1954 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1955 else
1956 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1957 if (retval != ERROR_OK)
1958 return retval;
1959
1960 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1961 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1962 if (retval != ERROR_OK)
1963 return retval;
1964
1965 if (size == 1)
1966 *buffer = (uint8_t)data;
1967 else if (size == 2)
1968 target_buffer_set_u16(target, buffer, (uint16_t)data);
1969 else
1970 target_buffer_set_u32(target, buffer, data);
1971
1972 /* Advance */
1973 buffer += size;
1974 --count;
1975 }
1976
1977 return ERROR_OK;
1978 }
1979
1980 static int aarch64_read_cpu_memory_fast(struct target *target,
1981 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1982 {
1983 struct armv8_common *armv8 = target_to_armv8(target);
1984 struct arm_dpm *dpm = &armv8->dpm;
1985 struct arm *arm = &armv8->arm;
1986 int retval;
1987 uint32_t value;
1988
1989 /* Mark X1 as dirty */
1990 armv8_reg_current(arm, 1)->dirty = true;
1991
1992 if (arm->core_state == ARM_STATE_AARCH64) {
1993 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1994 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1995 } else {
1996 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1997 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1998 }
1999
2000 if (retval != ERROR_OK)
2001 return retval;
2002
2003 /* Step 1.e - Change DCC to memory mode */
2004 *dscr |= DSCR_MA;
2005 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2006 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2007 if (retval != ERROR_OK)
2008 return retval;
2009
2010 /* Step 1.f - read DBGDTRTX and discard the value */
2011 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2012 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 count--;
2017 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2018 * Abort flags are sticky, so can be read at end of transactions
2019 *
2020 * This data is read in aligned to 32 bit boundary.
2021 */
2022
2023 if (count) {
2024 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2025 * increments X0 by 4. */
2026 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2027 armv8->debug_base + CPUV8_DBG_DTRTX);
2028 if (retval != ERROR_OK)
2029 return retval;
2030 }
2031
2032 /* Step 3.a - set DTR access mode back to Normal mode */
2033 *dscr &= ~DSCR_MA;
2034 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2035 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 /* Step 3.b - read DBGDTRTX for the final value */
2040 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2041 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2042 if (retval != ERROR_OK)
2043 return retval;
2044
2045 target_buffer_set_u32(target, buffer + count * 4, value);
2046 return retval;
2047 }
2048
2049 static int aarch64_read_cpu_memory(struct target *target,
2050 target_addr_t address, uint32_t size,
2051 uint32_t count, uint8_t *buffer)
2052 {
2053 /* read memory through APB-AP */
2054 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2055 struct armv8_common *armv8 = target_to_armv8(target);
2056 struct arm_dpm *dpm = &armv8->dpm;
2057 struct arm *arm = &armv8->arm;
2058 uint32_t dscr;
2059
2060 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2061 address, size, count);
2062
2063 if (target->state != TARGET_HALTED) {
2064 LOG_WARNING("target not halted");
2065 return ERROR_TARGET_NOT_HALTED;
2066 }
2067
2068 /* Mark register X0 as dirty, as it will be used
2069 * for transferring the data.
2070 * It will be restored automatically when exiting
2071 * debug mode
2072 */
2073 armv8_reg_current(arm, 0)->dirty = true;
2074
2075 /* Read DSCR */
2076 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2077 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2078 if (retval != ERROR_OK)
2079 return retval;
2080
2081 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2082
2083 /* Set Normal access mode */
2084 dscr &= ~DSCR_MA;
2085 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2086 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2087 if (retval != ERROR_OK)
2088 return retval;
2089
2090 if (arm->core_state == ARM_STATE_AARCH64) {
2091 /* Write X0 with value 'address' using write procedure */
2092 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2093 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2094 retval = dpm->instr_write_data_dcc_64(dpm,
2095 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2096 } else {
2097 /* Write R0 with value 'address' using write procedure */
2098 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2099 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2100 retval = dpm->instr_write_data_dcc(dpm,
2101 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2102 }
2103
2104 if (retval != ERROR_OK)
2105 return retval;
2106
2107 if (size == 4 && (address % 4) == 0)
2108 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2109 else
2110 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2111
2112 if (dscr & DSCR_MA) {
2113 dscr &= ~DSCR_MA;
2114 mem_ap_write_atomic_u32(armv8->debug_ap,
2115 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2116 }
2117
2118 if (retval != ERROR_OK)
2119 return retval;
2120
2121 /* Check for sticky abort flags in the DSCR */
2122 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2123 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2124 if (retval != ERROR_OK)
2125 return retval;
2126
2127 dpm->dscr = dscr;
2128
2129 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2130 /* Abort occurred - clear it and exit */
2131 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2132 armv8_dpm_handle_exception(dpm, true);
2133 return ERROR_FAIL;
2134 }
2135
2136 /* Done */
2137 return ERROR_OK;
2138 }
2139
2140 static int aarch64_read_phys_memory(struct target *target,
2141 target_addr_t address, uint32_t size,
2142 uint32_t count, uint8_t *buffer)
2143 {
2144 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2145
2146 if (count && buffer) {
2147 /* read memory through APB-AP */
2148 retval = aarch64_mmu_modify(target, 0);
2149 if (retval != ERROR_OK)
2150 return retval;
2151 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2152 }
2153 return retval;
2154 }
2155
2156 static int aarch64_read_memory(struct target *target, target_addr_t address,
2157 uint32_t size, uint32_t count, uint8_t *buffer)
2158 {
2159 int mmu_enabled = 0;
2160 int retval;
2161
2162 /* determine if MMU was enabled on target stop */
2163 retval = aarch64_mmu(target, &mmu_enabled);
2164 if (retval != ERROR_OK)
2165 return retval;
2166
2167 if (mmu_enabled) {
2168 /* enable MMU as we could have disabled it for phys access */
2169 retval = aarch64_mmu_modify(target, 1);
2170 if (retval != ERROR_OK)
2171 return retval;
2172 }
2173 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2174 }
2175
2176 static int aarch64_write_phys_memory(struct target *target,
2177 target_addr_t address, uint32_t size,
2178 uint32_t count, const uint8_t *buffer)
2179 {
2180 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2181
2182 if (count && buffer) {
2183 /* write memory through APB-AP */
2184 retval = aarch64_mmu_modify(target, 0);
2185 if (retval != ERROR_OK)
2186 return retval;
2187 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2188 }
2189
2190 return retval;
2191 }
2192
2193 static int aarch64_write_memory(struct target *target, target_addr_t address,
2194 uint32_t size, uint32_t count, const uint8_t *buffer)
2195 {
2196 int mmu_enabled = 0;
2197 int retval;
2198
2199 /* determine if MMU was enabled on target stop */
2200 retval = aarch64_mmu(target, &mmu_enabled);
2201 if (retval != ERROR_OK)
2202 return retval;
2203
2204 if (mmu_enabled) {
2205 /* enable MMU as we could have disabled it for phys access */
2206 retval = aarch64_mmu_modify(target, 1);
2207 if (retval != ERROR_OK)
2208 return retval;
2209 }
2210 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2211 }
2212
2213 static int aarch64_handle_target_request(void *priv)
2214 {
2215 struct target *target = priv;
2216 struct armv8_common *armv8 = target_to_armv8(target);
2217 int retval;
2218
2219 if (!target_was_examined(target))
2220 return ERROR_OK;
2221 if (!target->dbg_msg_enabled)
2222 return ERROR_OK;
2223
2224 if (target->state == TARGET_RUNNING) {
2225 uint32_t request;
2226 uint32_t dscr;
2227 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2228 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2229
2230 /* check if we have data */
2231 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2232 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2233 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2234 if (retval == ERROR_OK) {
2235 target_request(target, request);
2236 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2237 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2238 }
2239 }
2240 }
2241
2242 return ERROR_OK;
2243 }
2244
2245 static int aarch64_examine_first(struct target *target)
2246 {
2247 struct aarch64_common *aarch64 = target_to_aarch64(target);
2248 struct armv8_common *armv8 = &aarch64->armv8_common;
2249 struct adiv5_dap *swjdp = armv8->arm.dap;
2250 struct aarch64_private_config *pc;
2251 int i;
2252 int retval = ERROR_OK;
2253 uint64_t debug, ttypr;
2254 uint32_t cpuid;
2255 uint32_t tmp0, tmp1, tmp2, tmp3;
2256 debug = ttypr = cpuid = 0;
2257
2258 /* Search for the APB-AB - it is needed for access to debug registers */
2259 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2260 if (retval != ERROR_OK) {
2261 LOG_ERROR("Could not find APB-AP for debug access");
2262 return retval;
2263 }
2264
2265 retval = mem_ap_init(armv8->debug_ap);
2266 if (retval != ERROR_OK) {
2267 LOG_ERROR("Could not initialize the APB-AP");
2268 return retval;
2269 }
2270
2271 armv8->debug_ap->memaccess_tck = 10;
2272
2273 if (!target->dbgbase_set) {
2274 uint32_t dbgbase;
2275 /* Get ROM Table base */
2276 uint32_t apid;
2277 int32_t coreidx = target->coreid;
2278 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2279 if (retval != ERROR_OK)
2280 return retval;
2281 /* Lookup 0x15 -- Processor DAP */
2282 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2283 &armv8->debug_base, &coreidx);
2284 if (retval != ERROR_OK)
2285 return retval;
2286 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2287 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2288 } else
2289 armv8->debug_base = target->dbgbase;
2290
2291 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2292 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2293 if (retval != ERROR_OK) {
2294 LOG_DEBUG("Examine %s failed", "oslock");
2295 return retval;
2296 }
2297
2298 retval = mem_ap_read_u32(armv8->debug_ap,
2299 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2300 if (retval != ERROR_OK) {
2301 LOG_DEBUG("Examine %s failed", "CPUID");
2302 return retval;
2303 }
2304
2305 retval = mem_ap_read_u32(armv8->debug_ap,
2306 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2307 retval += mem_ap_read_u32(armv8->debug_ap,
2308 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2309 if (retval != ERROR_OK) {
2310 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2311 return retval;
2312 }
2313 retval = mem_ap_read_u32(armv8->debug_ap,
2314 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2315 retval += mem_ap_read_u32(armv8->debug_ap,
2316 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2317 if (retval != ERROR_OK) {
2318 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2319 return retval;
2320 }
2321
2322 retval = dap_run(armv8->debug_ap->dap);
2323 if (retval != ERROR_OK) {
2324 LOG_ERROR("%s: examination failed\n", target_name(target));
2325 return retval;
2326 }
2327
2328 ttypr |= tmp1;
2329 ttypr = (ttypr << 32) | tmp0;
2330 debug |= tmp3;
2331 debug = (debug << 32) | tmp2;
2332
2333 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2334 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2335 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2336
2337 if (target->private_config == NULL)
2338 return ERROR_FAIL;
2339
2340 pc = (struct aarch64_private_config *)target->private_config;
2341 if (pc->cti == NULL)
2342 return ERROR_FAIL;
2343
2344 armv8->cti = pc->cti;
2345
2346 retval = aarch64_dpm_setup(aarch64, debug);
2347 if (retval != ERROR_OK)
2348 return retval;
2349
2350 /* Setup Breakpoint Register Pairs */
2351 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2352 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2353 aarch64->brp_num_available = aarch64->brp_num;
2354 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2355 for (i = 0; i < aarch64->brp_num; i++) {
2356 aarch64->brp_list[i].used = 0;
2357 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2358 aarch64->brp_list[i].type = BRP_NORMAL;
2359 else
2360 aarch64->brp_list[i].type = BRP_CONTEXT;
2361 aarch64->brp_list[i].value = 0;
2362 aarch64->brp_list[i].control = 0;
2363 aarch64->brp_list[i].BRPn = i;
2364 }
2365
2366 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2367
2368 target->state = TARGET_UNKNOWN;
2369 target->debug_reason = DBG_REASON_NOTHALTED;
2370 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2371 target_set_examined(target);
2372 return ERROR_OK;
2373 }
2374
2375 static int aarch64_examine(struct target *target)
2376 {
2377 int retval = ERROR_OK;
2378
2379 /* don't re-probe hardware after each reset */
2380 if (!target_was_examined(target))
2381 retval = aarch64_examine_first(target);
2382
2383 /* Configure core debug access */
2384 if (retval == ERROR_OK)
2385 retval = aarch64_init_debug_access(target);
2386
2387 return retval;
2388 }
2389
2390 /*
2391 * Cortex-A8 target creation and initialization
2392 */
2393
2394 static int aarch64_init_target(struct command_context *cmd_ctx,
2395 struct target *target)
2396 {
2397 /* examine_first() does a bunch of this */
2398 arm_semihosting_init(target);
2399 return ERROR_OK;
2400 }
2401
2402 static int aarch64_init_arch_info(struct target *target,
2403 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2404 {
2405 struct armv8_common *armv8 = &aarch64->armv8_common;
2406
2407 /* Setup struct aarch64_common */
2408 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2409 armv8->arm.dap = dap;
2410
2411 /* register arch-specific functions */
2412 armv8->examine_debug_reason = NULL;
2413 armv8->post_debug_entry = aarch64_post_debug_entry;
2414 armv8->pre_restore_context = NULL;
2415 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2416
2417 armv8_init_arch_info(target, armv8);
2418 target_register_timer_callback(aarch64_handle_target_request, 1,
2419 TARGET_TIMER_TYPE_PERIODIC, target);
2420
2421 return ERROR_OK;
2422 }
2423
2424 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2425 {
2426 struct aarch64_private_config *pc = target->private_config;
2427 struct aarch64_common *aarch64;
2428
2429 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2430 return ERROR_FAIL;
2431
2432 aarch64 = calloc(1, sizeof(struct aarch64_common));
2433 if (aarch64 == NULL) {
2434 LOG_ERROR("Out of memory");
2435 return ERROR_FAIL;
2436 }
2437
2438 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2439 }
2440
2441 static void aarch64_deinit_target(struct target *target)
2442 {
2443 struct aarch64_common *aarch64 = target_to_aarch64(target);
2444 struct armv8_common *armv8 = &aarch64->armv8_common;
2445 struct arm_dpm *dpm = &armv8->dpm;
2446
2447 armv8_free_reg_cache(target);
2448 free(aarch64->brp_list);
2449 free(dpm->dbp);
2450 free(dpm->dwp);
2451 free(target->private_config);
2452 free(aarch64);
2453 }
2454
2455 static int aarch64_mmu(struct target *target, int *enabled)
2456 {
2457 if (target->state != TARGET_HALTED) {
2458 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2459 return ERROR_TARGET_INVALID;
2460 }
2461
2462 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2463 return ERROR_OK;
2464 }
2465
2466 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2467 target_addr_t *phys)
2468 {
2469 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2470 }
2471
2472 /*
2473 * private target configuration items
2474 */
2475 enum aarch64_cfg_param {
2476 CFG_CTI,
2477 };
2478
2479 static const Jim_Nvp nvp_config_opts[] = {
2480 { .name = "-cti", .value = CFG_CTI },
2481 { .name = NULL, .value = -1 }
2482 };
2483
2484 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2485 {
2486 struct aarch64_private_config *pc;
2487 Jim_Nvp *n;
2488 int e;
2489
2490 pc = (struct aarch64_private_config *)target->private_config;
2491 if (pc == NULL) {
2492 pc = calloc(1, sizeof(struct aarch64_private_config));
2493 target->private_config = pc;
2494 }
2495
2496 /*
2497 * Call adiv5_jim_configure() to parse the common DAP options
2498 * It will return JIM_CONTINUE if it didn't find any known
2499 * options, JIM_OK if it correctly parsed the topmost option
2500 * and JIM_ERR if an error occurred during parameter evaluation.
2501 * For JIM_CONTINUE, we check our own params.
2502 */
2503 e = adiv5_jim_configure(target, goi);
2504 if (e != JIM_CONTINUE)
2505 return e;
2506
2507 /* parse config or cget options ... */
2508 if (goi->argc > 0) {
2509 Jim_SetEmptyResult(goi->interp);
2510
2511 /* check first if topmost item is for us */
2512 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2513 goi->argv[0], &n);
2514 if (e != JIM_OK)
2515 return JIM_CONTINUE;
2516
2517 e = Jim_GetOpt_Obj(goi, NULL);
2518 if (e != JIM_OK)
2519 return e;
2520
2521 switch (n->value) {
2522 case CFG_CTI: {
2523 if (goi->isconfigure) {
2524 Jim_Obj *o_cti;
2525 struct arm_cti *cti;
2526 e = Jim_GetOpt_Obj(goi, &o_cti);
2527 if (e != JIM_OK)
2528 return e;
2529 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2530 if (cti == NULL) {
2531 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2532 return JIM_ERR;
2533 }
2534 pc->cti = cti;
2535 } else {
2536 if (goi->argc != 0) {
2537 Jim_WrongNumArgs(goi->interp,
2538 goi->argc, goi->argv,
2539 "NO PARAMS");
2540 return JIM_ERR;
2541 }
2542
2543 if (pc == NULL || pc->cti == NULL) {
2544 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2545 return JIM_ERR;
2546 }
2547 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2548 }
2549 break;
2550 }
2551
2552 default:
2553 return JIM_CONTINUE;
2554 }
2555 }
2556
2557 return JIM_OK;
2558 }
2559
2560 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2561 {
2562 struct target *target = get_current_target(CMD_CTX);
2563 struct armv8_common *armv8 = target_to_armv8(target);
2564
2565 return armv8_handle_cache_info_command(CMD,
2566 &armv8->armv8_mmu.armv8_cache);
2567 }
2568
2569
2570 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2571 {
2572 struct target *target = get_current_target(CMD_CTX);
2573 if (!target_was_examined(target)) {
2574 LOG_ERROR("target not examined yet");
2575 return ERROR_FAIL;
2576 }
2577
2578 return aarch64_init_debug_access(target);
2579 }
2580
2581 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2582 {
2583 struct target *target = get_current_target(CMD_CTX);
2584 struct aarch64_common *aarch64 = target_to_aarch64(target);
2585
2586 static const Jim_Nvp nvp_maskisr_modes[] = {
2587 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2588 { .name = "on", .value = AARCH64_ISRMASK_ON },
2589 { .name = NULL, .value = -1 },
2590 };
2591 const Jim_Nvp *n;
2592
2593 if (CMD_ARGC > 0) {
2594 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2595 if (n->name == NULL) {
2596 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2597 return ERROR_COMMAND_SYNTAX_ERROR;
2598 }
2599
2600 aarch64->isrmasking_mode = n->value;
2601 }
2602
2603 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2604 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2605
2606 return ERROR_OK;
2607 }
2608
2609 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2610 {
2611 struct command_context *context;
2612 struct target *target;
2613 struct arm *arm;
2614 int retval;
2615 bool is_mcr = false;
2616 int arg_cnt = 0;
2617
2618 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2619 is_mcr = true;
2620 arg_cnt = 7;
2621 } else {
2622 arg_cnt = 6;
2623 }
2624
2625 context = current_command_context(interp);
2626 assert(context != NULL);
2627
2628 target = get_current_target(context);
2629 if (target == NULL) {
2630 LOG_ERROR("%s: no current target", __func__);
2631 return JIM_ERR;
2632 }
2633 if (!target_was_examined(target)) {
2634 LOG_ERROR("%s: not yet examined", target_name(target));
2635 return JIM_ERR;
2636 }
2637
2638 arm = target_to_arm(target);
2639 if (!is_arm(arm)) {
2640 LOG_ERROR("%s: not an ARM", target_name(target));
2641 return JIM_ERR;
2642 }
2643
2644 if (target->state != TARGET_HALTED)
2645 return ERROR_TARGET_NOT_HALTED;
2646
2647 if (arm->core_state == ARM_STATE_AARCH64) {
2648 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2649 return JIM_ERR;
2650 }
2651
2652 if (argc != arg_cnt) {
2653 LOG_ERROR("%s: wrong number of arguments", __func__);
2654 return JIM_ERR;
2655 }
2656
2657 int cpnum;
2658 uint32_t op1;
2659 uint32_t op2;
2660 uint32_t CRn;
2661 uint32_t CRm;
2662 uint32_t value;
2663 long l;
2664
2665 /* NOTE: parameter sequence matches ARM instruction set usage:
2666 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2667 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2668 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2669 */
2670 retval = Jim_GetLong(interp, argv[1], &l);
2671 if (retval != JIM_OK)
2672 return retval;
2673 if (l & ~0xf) {
2674 LOG_ERROR("%s: %s %d out of range", __func__,
2675 "coprocessor", (int) l);
2676 return JIM_ERR;
2677 }
2678 cpnum = l;
2679
2680 retval = Jim_GetLong(interp, argv[2], &l);
2681 if (retval != JIM_OK)
2682 return retval;
2683 if (l & ~0x7) {
2684 LOG_ERROR("%s: %s %d out of range", __func__,
2685 "op1", (int) l);
2686 return JIM_ERR;
2687 }
2688 op1 = l;
2689
2690 retval = Jim_GetLong(interp, argv[3], &l);
2691 if (retval != JIM_OK)
2692 return retval;
2693 if (l & ~0xf) {
2694 LOG_ERROR("%s: %s %d out of range", __func__,
2695 "CRn", (int) l);
2696 return JIM_ERR;
2697 }
2698 CRn = l;
2699
2700 retval = Jim_GetLong(interp, argv[4], &l);
2701 if (retval != JIM_OK)
2702 return retval;
2703 if (l & ~0xf) {
2704 LOG_ERROR("%s: %s %d out of range", __func__,
2705 "CRm", (int) l);
2706 return JIM_ERR;
2707 }
2708 CRm = l;
2709
2710 retval = Jim_GetLong(interp, argv[5], &l);
2711 if (retval != JIM_OK)
2712 return retval;
2713 if (l & ~0x7) {
2714 LOG_ERROR("%s: %s %d out of range", __func__,
2715 "op2", (int) l);
2716 return JIM_ERR;
2717 }
2718 op2 = l;
2719
2720 value = 0;
2721
2722 if (is_mcr == true) {
2723 retval = Jim_GetLong(interp, argv[6], &l);
2724 if (retval != JIM_OK)
2725 return retval;
2726 value = l;
2727
2728 /* NOTE: parameters reordered! */
2729 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2730 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2731 if (retval != ERROR_OK)
2732 return JIM_ERR;
2733 } else {
2734 /* NOTE: parameters reordered! */
2735 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2736 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2737 if (retval != ERROR_OK)
2738 return JIM_ERR;
2739
2740 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2741 }
2742
2743 return JIM_OK;
2744 }
2745
2746 static const struct command_registration aarch64_exec_command_handlers[] = {
2747 {
2748 .name = "cache_info",
2749 .handler = aarch64_handle_cache_info_command,
2750 .mode = COMMAND_EXEC,
2751 .help = "display information about target caches",
2752 .usage = "",
2753 },
2754 {
2755 .name = "dbginit",
2756 .handler = aarch64_handle_dbginit_command,
2757 .mode = COMMAND_EXEC,
2758 .help = "Initialize core debug",
2759 .usage = "",
2760 },
2761 {
2762 .name = "maskisr",
2763 .handler = aarch64_mask_interrupts_command,
2764 .mode = COMMAND_ANY,
2765 .help = "mask aarch64 interrupts during single-step",
2766 .usage = "['on'|'off']",
2767 },
2768 {
2769 .name = "mcr",
2770 .mode = COMMAND_EXEC,
2771 .jim_handler = jim_mcrmrc,
2772 .help = "write coprocessor register",
2773 .usage = "cpnum op1 CRn CRm op2 value",
2774 },
2775 {
2776 .name = "mrc",
2777 .mode = COMMAND_EXEC,
2778 .jim_handler = jim_mcrmrc,
2779 .help = "read coprocessor register",
2780 .usage = "cpnum op1 CRn CRm op2",
2781 },
2782 {
2783 .chain = smp_command_handlers,
2784 },
2785
2786
2787 COMMAND_REGISTRATION_DONE
2788 };
2789
2790 extern const struct command_registration semihosting_common_handlers[];
2791
2792 static const struct command_registration aarch64_command_handlers[] = {
2793 {
2794 .name = "arm",
2795 .mode = COMMAND_ANY,
2796 .help = "ARM Command Group",
2797 .usage = "",
2798 .chain = semihosting_common_handlers
2799 },
2800 {
2801 .chain = armv8_command_handlers,
2802 },
2803 {
2804 .name = "aarch64",
2805 .mode = COMMAND_ANY,
2806 .help = "Aarch64 command group",
2807 .usage = "",
2808 .chain = aarch64_exec_command_handlers,
2809 },
2810 COMMAND_REGISTRATION_DONE
2811 };
2812
2813 struct target_type aarch64_target = {
2814 .name = "aarch64",
2815
2816 .poll = aarch64_poll,
2817 .arch_state = armv8_arch_state,
2818
2819 .halt = aarch64_halt,
2820 .resume = aarch64_resume,
2821 .step = aarch64_step,
2822
2823 .assert_reset = aarch64_assert_reset,
2824 .deassert_reset = aarch64_deassert_reset,
2825
2826 /* REVISIT allow exporting VFP3 registers ... */
2827 .get_gdb_arch = armv8_get_gdb_arch,
2828 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2829
2830 .read_memory = aarch64_read_memory,
2831 .write_memory = aarch64_write_memory,
2832
2833 .add_breakpoint = aarch64_add_breakpoint,
2834 .add_context_breakpoint = aarch64_add_context_breakpoint,
2835 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2836 .remove_breakpoint = aarch64_remove_breakpoint,
2837 .add_watchpoint = NULL,
2838 .remove_watchpoint = NULL,
2839
2840 .commands = aarch64_command_handlers,
2841 .target_create = aarch64_target_create,
2842 .target_jim_configure = aarch64_jim_configure,
2843 .init_target = aarch64_init_target,
2844 .deinit_target = aarch64_deinit_target,
2845 .examine = aarch64_examine,
2846
2847 .read_phys_memory = aarch64_read_phys_memory,
2848 .write_phys_memory = aarch64_write_phys_memory,
2849 .mmu = aarch64_mmu,
2850 .virt2phys = aarch64_virt2phys,
2851 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)