target/aarch64: fix use of 'target->private_config'
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "a64_disassembler.h"
27 #include "register.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_semihosting.h"
33 #include "jtag/interface.h"
34 #include "smp.h"
35 #include <helper/time_support.h>
36
37 enum restart_mode {
38 RESTART_LAZY,
39 RESTART_SYNC,
40 };
41
42 enum halt_mode {
43 HALT_LAZY,
44 HALT_SYNC,
45 };
46
47 struct aarch64_private_config {
48 struct adiv5_private_config adiv5_config;
49 struct arm_cti *cti;
50 };
51
52 static int aarch64_poll(struct target *target);
53 static int aarch64_debug_entry(struct target *target);
54 static int aarch64_restore_context(struct target *target, bool bpwp);
55 static int aarch64_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int aarch64_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int aarch64_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int aarch64_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int aarch64_mmu(struct target *target, int *enabled);
64 static int aarch64_virt2phys(struct target *target,
65 target_addr_t virt, target_addr_t *phys);
66 static int aarch64_read_cpu_memory(struct target *target,
67 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
68
69 static int aarch64_restore_system_control_reg(struct target *target)
70 {
71 enum arm_mode target_mode = ARM_MODE_ANY;
72 int retval = ERROR_OK;
73 uint32_t instr;
74
75 struct aarch64_common *aarch64 = target_to_aarch64(target);
76 struct armv8_common *armv8 = target_to_armv8(target);
77
78 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
79 aarch64->system_control_reg_curr = aarch64->system_control_reg;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81
82 switch (armv8->arm.core_mode) {
83 case ARMV8_64_EL0T:
84 target_mode = ARMV8_64_EL1H;
85 /* fall through */
86 case ARMV8_64_EL1T:
87 case ARMV8_64_EL1H:
88 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
89 break;
90 case ARMV8_64_EL2T:
91 case ARMV8_64_EL2H:
92 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
93 break;
94 case ARMV8_64_EL3H:
95 case ARMV8_64_EL3T:
96 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
97 break;
98
99 case ARM_MODE_SVC:
100 case ARM_MODE_ABT:
101 case ARM_MODE_FIQ:
102 case ARM_MODE_IRQ:
103 case ARM_MODE_HYP:
104 case ARM_MODE_SYS:
105 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
106 break;
107
108 default:
109 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
110 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
111 return ERROR_FAIL;
112 }
113
114 if (target_mode != ARM_MODE_ANY)
115 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
116
117 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
118 if (retval != ERROR_OK)
119 return retval;
120
121 if (target_mode != ARM_MODE_ANY)
122 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
123 }
124
125 return retval;
126 }
127
128 /* modify system_control_reg in order to enable or disable mmu for :
129 * - virt2phys address conversion
130 * - read or write memory in phys or virt address */
131 static int aarch64_mmu_modify(struct target *target, int enable)
132 {
133 struct aarch64_common *aarch64 = target_to_aarch64(target);
134 struct armv8_common *armv8 = &aarch64->armv8_common;
135 int retval = ERROR_OK;
136 uint32_t instr = 0;
137
138 if (enable) {
139 /* if mmu enabled at target stop and mmu not enable */
140 if (!(aarch64->system_control_reg & 0x1U)) {
141 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
142 return ERROR_FAIL;
143 }
144 if (!(aarch64->system_control_reg_curr & 0x1U))
145 aarch64->system_control_reg_curr |= 0x1U;
146 } else {
147 if (aarch64->system_control_reg_curr & 0x4U) {
148 /* data cache is active */
149 aarch64->system_control_reg_curr &= ~0x4U;
150 /* flush data cache armv8 function to be called */
151 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
152 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
153 }
154 if ((aarch64->system_control_reg_curr & 0x1U)) {
155 aarch64->system_control_reg_curr &= ~0x1U;
156 }
157 }
158
159 switch (armv8->arm.core_mode) {
160 case ARMV8_64_EL0T:
161 case ARMV8_64_EL1T:
162 case ARMV8_64_EL1H:
163 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
164 break;
165 case ARMV8_64_EL2T:
166 case ARMV8_64_EL2H:
167 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
168 break;
169 case ARMV8_64_EL3H:
170 case ARMV8_64_EL3T:
171 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
172 break;
173
174 case ARM_MODE_SVC:
175 case ARM_MODE_ABT:
176 case ARM_MODE_FIQ:
177 case ARM_MODE_IRQ:
178 case ARM_MODE_HYP:
179 case ARM_MODE_SYS:
180 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
181 break;
182
183 default:
184 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
185 break;
186 }
187
188 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
189 aarch64->system_control_reg_curr);
190 return retval;
191 }
192
193 /*
194 * Basic debug access, very low level assumes state is saved
195 */
196 static int aarch64_init_debug_access(struct target *target)
197 {
198 struct armv8_common *armv8 = target_to_armv8(target);
199 int retval;
200 uint32_t dummy;
201
202 LOG_DEBUG("%s", target_name(target));
203
204 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
205 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
206 if (retval != ERROR_OK) {
207 LOG_DEBUG("Examine %s failed", "oslock");
208 return retval;
209 }
210
211 /* Clear Sticky Power Down status Bit in PRSR to enable access to
212 the registers in the Core Power Domain */
213 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
214 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
215 if (retval != ERROR_OK)
216 return retval;
217
218 /*
219 * Static CTI configuration:
220 * Channel 0 -> trigger outputs HALT request to PE
221 * Channel 1 -> trigger outputs Resume request to PE
222 * Gate all channel trigger events from entering the CTM
223 */
224
225 /* Enable CTI */
226 retval = arm_cti_enable(armv8->cti, true);
227 /* By default, gate all channel events to and from the CTM */
228 if (retval == ERROR_OK)
229 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
230 /* output halt requests to PE on channel 0 event */
231 if (retval == ERROR_OK)
232 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
233 /* output restart requests to PE on channel 1 event */
234 if (retval == ERROR_OK)
235 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
236 if (retval != ERROR_OK)
237 return retval;
238
239 /* Resync breakpoint registers */
240
241 return ERROR_OK;
242 }
243
244 /* Write to memory mapped registers directly with no cache or mmu handling */
245 static int aarch64_dap_write_memap_register_u32(struct target *target,
246 uint32_t address,
247 uint32_t value)
248 {
249 int retval;
250 struct armv8_common *armv8 = target_to_armv8(target);
251
252 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
253
254 return retval;
255 }
256
257 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
258 {
259 struct arm_dpm *dpm = &a8->armv8_common.dpm;
260 int retval;
261
262 dpm->arm = &a8->armv8_common.arm;
263 dpm->didr = debug;
264
265 retval = armv8_dpm_setup(dpm);
266 if (retval == ERROR_OK)
267 retval = armv8_dpm_initialize(dpm);
268
269 return retval;
270 }
271
272 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
273 {
274 struct armv8_common *armv8 = target_to_armv8(target);
275 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
276 }
277
278 static int aarch64_check_state_one(struct target *target,
279 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
280 {
281 struct armv8_common *armv8 = target_to_armv8(target);
282 uint32_t prsr;
283 int retval;
284
285 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
286 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
287 if (retval != ERROR_OK)
288 return retval;
289
290 if (p_prsr)
291 *p_prsr = prsr;
292
293 if (p_result)
294 *p_result = (prsr & mask) == (val & mask);
295
296 return ERROR_OK;
297 }
298
299 static int aarch64_wait_halt_one(struct target *target)
300 {
301 int retval = ERROR_OK;
302 uint32_t prsr;
303
304 int64_t then = timeval_ms();
305 for (;;) {
306 int halted;
307
308 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
309 if (retval != ERROR_OK || halted)
310 break;
311
312 if (timeval_ms() > then + 1000) {
313 retval = ERROR_TARGET_TIMEOUT;
314 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
315 break;
316 }
317 }
318 return retval;
319 }
320
321 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
322 {
323 int retval = ERROR_OK;
324 struct target_list *head = target->head;
325 struct target *first = NULL;
326
327 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
328
329 while (head != NULL) {
330 struct target *curr = head->target;
331 struct armv8_common *armv8 = target_to_armv8(curr);
332 head = head->next;
333
334 if (exc_target && curr == target)
335 continue;
336 if (!target_was_examined(curr))
337 continue;
338 if (curr->state != TARGET_RUNNING)
339 continue;
340
341 /* HACK: mark this target as prepared for halting */
342 curr->debug_reason = DBG_REASON_DBGRQ;
343
344 /* open the gate for channel 0 to let HALT requests pass to the CTM */
345 retval = arm_cti_ungate_channel(armv8->cti, 0);
346 if (retval == ERROR_OK)
347 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
348 if (retval != ERROR_OK)
349 break;
350
351 LOG_DEBUG("target %s prepared", target_name(curr));
352
353 if (first == NULL)
354 first = curr;
355 }
356
357 if (p_first) {
358 if (exc_target && first)
359 *p_first = first;
360 else
361 *p_first = target;
362 }
363
364 return retval;
365 }
366
367 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
368 {
369 int retval = ERROR_OK;
370 struct armv8_common *armv8 = target_to_armv8(target);
371
372 LOG_DEBUG("%s", target_name(target));
373
374 /* allow Halting Debug Mode */
375 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
376 if (retval != ERROR_OK)
377 return retval;
378
379 /* trigger an event on channel 0, this outputs a halt request to the PE */
380 retval = arm_cti_pulse_channel(armv8->cti, 0);
381 if (retval != ERROR_OK)
382 return retval;
383
384 if (mode == HALT_SYNC) {
385 retval = aarch64_wait_halt_one(target);
386 if (retval != ERROR_OK) {
387 if (retval == ERROR_TARGET_TIMEOUT)
388 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
389 return retval;
390 }
391 }
392
393 return ERROR_OK;
394 }
395
396 static int aarch64_halt_smp(struct target *target, bool exc_target)
397 {
398 struct target *next = target;
399 int retval;
400
401 /* prepare halt on all PEs of the group */
402 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
403
404 if (exc_target && next == target)
405 return retval;
406
407 /* halt the target PE */
408 if (retval == ERROR_OK)
409 retval = aarch64_halt_one(next, HALT_LAZY);
410
411 if (retval != ERROR_OK)
412 return retval;
413
414 /* wait for all PEs to halt */
415 int64_t then = timeval_ms();
416 for (;;) {
417 bool all_halted = true;
418 struct target_list *head;
419 struct target *curr;
420
421 foreach_smp_target(head, target->head) {
422 int halted;
423
424 curr = head->target;
425
426 if (!target_was_examined(curr))
427 continue;
428
429 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
430 if (retval != ERROR_OK || !halted) {
431 all_halted = false;
432 break;
433 }
434 }
435
436 if (all_halted)
437 break;
438
439 if (timeval_ms() > then + 1000) {
440 retval = ERROR_TARGET_TIMEOUT;
441 break;
442 }
443
444 /*
445 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
446 * and it looks like the CTI's are not connected by a common
447 * trigger matrix. It seems that we need to halt one core in each
448 * cluster explicitly. So if we find that a core has not halted
449 * yet, we trigger an explicit halt for the second cluster.
450 */
451 retval = aarch64_halt_one(curr, HALT_LAZY);
452 if (retval != ERROR_OK)
453 break;
454 }
455
456 return retval;
457 }
458
459 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
460 {
461 struct target *gdb_target = NULL;
462 struct target_list *head;
463 struct target *curr;
464
465 if (debug_reason == DBG_REASON_NOTHALTED) {
466 LOG_DEBUG("Halting remaining targets in SMP group");
467 aarch64_halt_smp(target, true);
468 }
469
470 /* poll all targets in the group, but skip the target that serves GDB */
471 foreach_smp_target(head, target->head) {
472 curr = head->target;
473 /* skip calling context */
474 if (curr == target)
475 continue;
476 if (!target_was_examined(curr))
477 continue;
478 /* skip targets that were already halted */
479 if (curr->state == TARGET_HALTED)
480 continue;
481 /* remember the gdb_service->target */
482 if (curr->gdb_service != NULL)
483 gdb_target = curr->gdb_service->target;
484 /* skip it */
485 if (curr == gdb_target)
486 continue;
487
488 /* avoid recursion in aarch64_poll() */
489 curr->smp = 0;
490 aarch64_poll(curr);
491 curr->smp = 1;
492 }
493
494 /* after all targets were updated, poll the gdb serving target */
495 if (gdb_target != NULL && gdb_target != target)
496 aarch64_poll(gdb_target);
497
498 return ERROR_OK;
499 }
500
501 /*
502 * Aarch64 Run control
503 */
504
505 static int aarch64_poll(struct target *target)
506 {
507 enum target_state prev_target_state;
508 int retval = ERROR_OK;
509 int halted;
510
511 retval = aarch64_check_state_one(target,
512 PRSR_HALT, PRSR_HALT, &halted, NULL);
513 if (retval != ERROR_OK)
514 return retval;
515
516 if (halted) {
517 prev_target_state = target->state;
518 if (prev_target_state != TARGET_HALTED) {
519 enum target_debug_reason debug_reason = target->debug_reason;
520
521 /* We have a halting debug event */
522 target->state = TARGET_HALTED;
523 LOG_DEBUG("Target %s halted", target_name(target));
524 retval = aarch64_debug_entry(target);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if (target->smp)
529 update_halt_gdb(target, debug_reason);
530
531 if (arm_semihosting(target, &retval) != 0)
532 return retval;
533
534 switch (prev_target_state) {
535 case TARGET_RUNNING:
536 case TARGET_UNKNOWN:
537 case TARGET_RESET:
538 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
539 break;
540 case TARGET_DEBUG_RUNNING:
541 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
542 break;
543 default:
544 break;
545 }
546 }
547 } else
548 target->state = TARGET_RUNNING;
549
550 return retval;
551 }
552
553 static int aarch64_halt(struct target *target)
554 {
555 struct armv8_common *armv8 = target_to_armv8(target);
556 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
557
558 if (target->smp)
559 return aarch64_halt_smp(target, false);
560
561 return aarch64_halt_one(target, HALT_SYNC);
562 }
563
564 static int aarch64_restore_one(struct target *target, int current,
565 uint64_t *address, int handle_breakpoints, int debug_execution)
566 {
567 struct armv8_common *armv8 = target_to_armv8(target);
568 struct arm *arm = &armv8->arm;
569 int retval;
570 uint64_t resume_pc;
571
572 LOG_DEBUG("%s", target_name(target));
573
574 if (!debug_execution)
575 target_free_all_working_areas(target);
576
577 /* current = 1: continue on current pc, otherwise continue at <address> */
578 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
579 if (!current)
580 resume_pc = *address;
581 else
582 *address = resume_pc;
583
584 /* Make sure that the Armv7 gdb thumb fixups does not
585 * kill the return address
586 */
587 switch (arm->core_state) {
588 case ARM_STATE_ARM:
589 resume_pc &= 0xFFFFFFFC;
590 break;
591 case ARM_STATE_AARCH64:
592 resume_pc &= 0xFFFFFFFFFFFFFFFC;
593 break;
594 case ARM_STATE_THUMB:
595 case ARM_STATE_THUMB_EE:
596 /* When the return address is loaded into PC
597 * bit 0 must be 1 to stay in Thumb state
598 */
599 resume_pc |= 0x1;
600 break;
601 case ARM_STATE_JAZELLE:
602 LOG_ERROR("How do I resume into Jazelle state??");
603 return ERROR_FAIL;
604 }
605 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
606 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
607 arm->pc->dirty = true;
608 arm->pc->valid = true;
609
610 /* called it now before restoring context because it uses cpu
611 * register r0 for restoring system control register */
612 retval = aarch64_restore_system_control_reg(target);
613 if (retval == ERROR_OK)
614 retval = aarch64_restore_context(target, handle_breakpoints);
615
616 return retval;
617 }
618
619 /**
620 * prepare single target for restart
621 *
622 *
623 */
624 static int aarch64_prepare_restart_one(struct target *target)
625 {
626 struct armv8_common *armv8 = target_to_armv8(target);
627 int retval;
628 uint32_t dscr;
629 uint32_t tmp;
630
631 LOG_DEBUG("%s", target_name(target));
632
633 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
635 if (retval != ERROR_OK)
636 return retval;
637
638 if ((dscr & DSCR_ITE) == 0)
639 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
640 if ((dscr & DSCR_ERR) != 0)
641 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
642
643 /* acknowledge a pending CTI halt event */
644 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
645 /*
646 * open the CTI gate for channel 1 so that the restart events
647 * get passed along to all PEs. Also close gate for channel 0
648 * to isolate the PE from halt events.
649 */
650 if (retval == ERROR_OK)
651 retval = arm_cti_ungate_channel(armv8->cti, 1);
652 if (retval == ERROR_OK)
653 retval = arm_cti_gate_channel(armv8->cti, 0);
654
655 /* make sure that DSCR.HDE is set */
656 if (retval == ERROR_OK) {
657 dscr |= DSCR_HDE;
658 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
659 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
660 }
661
662 if (retval == ERROR_OK) {
663 /* clear sticky bits in PRSR, SDR is now 0 */
664 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
665 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
666 }
667
668 return retval;
669 }
670
671 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
672 {
673 struct armv8_common *armv8 = target_to_armv8(target);
674 int retval;
675
676 LOG_DEBUG("%s", target_name(target));
677
678 /* trigger an event on channel 1, generates a restart request to the PE */
679 retval = arm_cti_pulse_channel(armv8->cti, 1);
680 if (retval != ERROR_OK)
681 return retval;
682
683 if (mode == RESTART_SYNC) {
684 int64_t then = timeval_ms();
685 for (;;) {
686 int resumed;
687 /*
688 * if PRSR.SDR is set now, the target did restart, even
689 * if it's now already halted again (e.g. due to breakpoint)
690 */
691 retval = aarch64_check_state_one(target,
692 PRSR_SDR, PRSR_SDR, &resumed, NULL);
693 if (retval != ERROR_OK || resumed)
694 break;
695
696 if (timeval_ms() > then + 1000) {
697 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
698 retval = ERROR_TARGET_TIMEOUT;
699 break;
700 }
701 }
702 }
703
704 if (retval != ERROR_OK)
705 return retval;
706
707 target->debug_reason = DBG_REASON_NOTHALTED;
708 target->state = TARGET_RUNNING;
709
710 return ERROR_OK;
711 }
712
713 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
714 {
715 int retval;
716
717 LOG_DEBUG("%s", target_name(target));
718
719 retval = aarch64_prepare_restart_one(target);
720 if (retval == ERROR_OK)
721 retval = aarch64_do_restart_one(target, mode);
722
723 return retval;
724 }
725
726 /*
727 * prepare all but the current target for restart
728 */
729 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
730 {
731 int retval = ERROR_OK;
732 struct target_list *head;
733 struct target *first = NULL;
734 uint64_t address;
735
736 foreach_smp_target(head, target->head) {
737 struct target *curr = head->target;
738
739 /* skip calling target */
740 if (curr == target)
741 continue;
742 if (!target_was_examined(curr))
743 continue;
744 if (curr->state != TARGET_HALTED)
745 continue;
746
747 /* resume at current address, not in step mode */
748 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
749 if (retval == ERROR_OK)
750 retval = aarch64_prepare_restart_one(curr);
751 if (retval != ERROR_OK) {
752 LOG_ERROR("failed to restore target %s", target_name(curr));
753 break;
754 }
755 /* remember the first valid target in the group */
756 if (first == NULL)
757 first = curr;
758 }
759
760 if (p_first)
761 *p_first = first;
762
763 return retval;
764 }
765
766
767 static int aarch64_step_restart_smp(struct target *target)
768 {
769 int retval = ERROR_OK;
770 struct target_list *head;
771 struct target *first = NULL;
772
773 LOG_DEBUG("%s", target_name(target));
774
775 retval = aarch64_prep_restart_smp(target, 0, &first);
776 if (retval != ERROR_OK)
777 return retval;
778
779 if (first != NULL)
780 retval = aarch64_do_restart_one(first, RESTART_LAZY);
781 if (retval != ERROR_OK) {
782 LOG_DEBUG("error restarting target %s", target_name(first));
783 return retval;
784 }
785
786 int64_t then = timeval_ms();
787 for (;;) {
788 struct target *curr = target;
789 bool all_resumed = true;
790
791 foreach_smp_target(head, target->head) {
792 uint32_t prsr;
793 int resumed;
794
795 curr = head->target;
796
797 if (curr == target)
798 continue;
799
800 if (!target_was_examined(curr))
801 continue;
802
803 retval = aarch64_check_state_one(curr,
804 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
805 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
806 all_resumed = false;
807 break;
808 }
809
810 if (curr->state != TARGET_RUNNING) {
811 curr->state = TARGET_RUNNING;
812 curr->debug_reason = DBG_REASON_NOTHALTED;
813 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
814 }
815 }
816
817 if (all_resumed)
818 break;
819
820 if (timeval_ms() > then + 1000) {
821 LOG_ERROR("%s: timeout waiting for target resume", __func__);
822 retval = ERROR_TARGET_TIMEOUT;
823 break;
824 }
825 /*
826 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
827 * and it looks like the CTI's are not connected by a common
828 * trigger matrix. It seems that we need to halt one core in each
829 * cluster explicitly. So if we find that a core has not halted
830 * yet, we trigger an explicit resume for the second cluster.
831 */
832 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
833 if (retval != ERROR_OK)
834 break;
835 }
836
837 return retval;
838 }
839
840 static int aarch64_resume(struct target *target, int current,
841 target_addr_t address, int handle_breakpoints, int debug_execution)
842 {
843 int retval = 0;
844 uint64_t addr = address;
845
846 struct armv8_common *armv8 = target_to_armv8(target);
847 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
848
849 if (target->state != TARGET_HALTED)
850 return ERROR_TARGET_NOT_HALTED;
851
852 /*
853 * If this target is part of a SMP group, prepare the others
854 * targets for resuming. This involves restoring the complete
855 * target register context and setting up CTI gates to accept
856 * resume events from the trigger matrix.
857 */
858 if (target->smp) {
859 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
860 if (retval != ERROR_OK)
861 return retval;
862 }
863
864 /* all targets prepared, restore and restart the current target */
865 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
866 debug_execution);
867 if (retval == ERROR_OK)
868 retval = aarch64_restart_one(target, RESTART_SYNC);
869 if (retval != ERROR_OK)
870 return retval;
871
872 if (target->smp) {
873 int64_t then = timeval_ms();
874 for (;;) {
875 struct target *curr = target;
876 struct target_list *head;
877 bool all_resumed = true;
878
879 foreach_smp_target(head, target->head) {
880 uint32_t prsr;
881 int resumed;
882
883 curr = head->target;
884 if (curr == target)
885 continue;
886 if (!target_was_examined(curr))
887 continue;
888
889 retval = aarch64_check_state_one(curr,
890 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
891 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
892 all_resumed = false;
893 break;
894 }
895
896 if (curr->state != TARGET_RUNNING) {
897 curr->state = TARGET_RUNNING;
898 curr->debug_reason = DBG_REASON_NOTHALTED;
899 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
900 }
901 }
902
903 if (all_resumed)
904 break;
905
906 if (timeval_ms() > then + 1000) {
907 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
908 retval = ERROR_TARGET_TIMEOUT;
909 break;
910 }
911
912 /*
913 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
914 * and it looks like the CTI's are not connected by a common
915 * trigger matrix. It seems that we need to halt one core in each
916 * cluster explicitly. So if we find that a core has not halted
917 * yet, we trigger an explicit resume for the second cluster.
918 */
919 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
920 if (retval != ERROR_OK)
921 break;
922 }
923 }
924
925 if (retval != ERROR_OK)
926 return retval;
927
928 target->debug_reason = DBG_REASON_NOTHALTED;
929
930 if (!debug_execution) {
931 target->state = TARGET_RUNNING;
932 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
933 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
934 } else {
935 target->state = TARGET_DEBUG_RUNNING;
936 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
937 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
938 }
939
940 return ERROR_OK;
941 }
942
943 static int aarch64_debug_entry(struct target *target)
944 {
945 int retval = ERROR_OK;
946 struct armv8_common *armv8 = target_to_armv8(target);
947 struct arm_dpm *dpm = &armv8->dpm;
948 enum arm_state core_state;
949 uint32_t dscr;
950
951 /* make sure to clear all sticky errors */
952 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
953 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
954 if (retval == ERROR_OK)
955 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
956 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
957 if (retval == ERROR_OK)
958 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
959
960 if (retval != ERROR_OK)
961 return retval;
962
963 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
964
965 dpm->dscr = dscr;
966 core_state = armv8_dpm_get_core_state(dpm);
967 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
968 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
969
970 /* close the CTI gate for all events */
971 if (retval == ERROR_OK)
972 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
973 /* discard async exceptions */
974 if (retval == ERROR_OK)
975 retval = dpm->instr_cpsr_sync(dpm);
976 if (retval != ERROR_OK)
977 return retval;
978
979 /* Examine debug reason */
980 armv8_dpm_report_dscr(dpm, dscr);
981
982 /* save address of instruction that triggered the watchpoint? */
983 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
984 uint32_t tmp;
985 uint64_t wfar = 0;
986
987 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
988 armv8->debug_base + CPUV8_DBG_WFAR1,
989 &tmp);
990 if (retval != ERROR_OK)
991 return retval;
992 wfar = tmp;
993 wfar = (wfar << 32);
994 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
995 armv8->debug_base + CPUV8_DBG_WFAR0,
996 &tmp);
997 if (retval != ERROR_OK)
998 return retval;
999 wfar |= tmp;
1000 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1001 }
1002
1003 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1004
1005 if (retval == ERROR_OK && armv8->post_debug_entry)
1006 retval = armv8->post_debug_entry(target);
1007
1008 return retval;
1009 }
1010
1011 static int aarch64_post_debug_entry(struct target *target)
1012 {
1013 struct aarch64_common *aarch64 = target_to_aarch64(target);
1014 struct armv8_common *armv8 = &aarch64->armv8_common;
1015 int retval;
1016 enum arm_mode target_mode = ARM_MODE_ANY;
1017 uint32_t instr;
1018
1019 switch (armv8->arm.core_mode) {
1020 case ARMV8_64_EL0T:
1021 target_mode = ARMV8_64_EL1H;
1022 /* fall through */
1023 case ARMV8_64_EL1T:
1024 case ARMV8_64_EL1H:
1025 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1026 break;
1027 case ARMV8_64_EL2T:
1028 case ARMV8_64_EL2H:
1029 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1030 break;
1031 case ARMV8_64_EL3H:
1032 case ARMV8_64_EL3T:
1033 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1034 break;
1035
1036 case ARM_MODE_SVC:
1037 case ARM_MODE_ABT:
1038 case ARM_MODE_FIQ:
1039 case ARM_MODE_IRQ:
1040 case ARM_MODE_HYP:
1041 case ARM_MODE_SYS:
1042 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1043 break;
1044
1045 default:
1046 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1047 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1048 return ERROR_FAIL;
1049 }
1050
1051 if (target_mode != ARM_MODE_ANY)
1052 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1053
1054 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1055 if (retval != ERROR_OK)
1056 return retval;
1057
1058 if (target_mode != ARM_MODE_ANY)
1059 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1060
1061 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1062 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1063
1064 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1065 armv8_identify_cache(armv8);
1066 armv8_read_mpidr(armv8);
1067 }
1068
1069 armv8->armv8_mmu.mmu_enabled =
1070 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1071 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1072 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1073 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1074 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1075 return ERROR_OK;
1076 }
1077
1078 /*
1079 * single-step a target
1080 */
1081 static int aarch64_step(struct target *target, int current, target_addr_t address,
1082 int handle_breakpoints)
1083 {
1084 struct armv8_common *armv8 = target_to_armv8(target);
1085 struct aarch64_common *aarch64 = target_to_aarch64(target);
1086 int saved_retval = ERROR_OK;
1087 int retval;
1088 uint32_t edecr;
1089
1090 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1091
1092 if (target->state != TARGET_HALTED) {
1093 LOG_WARNING("target not halted");
1094 return ERROR_TARGET_NOT_HALTED;
1095 }
1096
1097 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1098 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1099 /* make sure EDECR.SS is not set when restoring the register */
1100
1101 if (retval == ERROR_OK) {
1102 edecr &= ~0x4;
1103 /* set EDECR.SS to enter hardware step mode */
1104 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1105 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1106 }
1107 /* disable interrupts while stepping */
1108 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1109 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1110 /* bail out if stepping setup has failed */
1111 if (retval != ERROR_OK)
1112 return retval;
1113
1114 if (target->smp && (current == 1)) {
1115 /*
1116 * isolate current target so that it doesn't get resumed
1117 * together with the others
1118 */
1119 retval = arm_cti_gate_channel(armv8->cti, 1);
1120 /* resume all other targets in the group */
1121 if (retval == ERROR_OK)
1122 retval = aarch64_step_restart_smp(target);
1123 if (retval != ERROR_OK) {
1124 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1125 return retval;
1126 }
1127 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1128 }
1129
1130 /* all other targets running, restore and restart the current target */
1131 retval = aarch64_restore_one(target, current, &address, 0, 0);
1132 if (retval == ERROR_OK)
1133 retval = aarch64_restart_one(target, RESTART_LAZY);
1134
1135 if (retval != ERROR_OK)
1136 return retval;
1137
1138 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1139 if (!handle_breakpoints)
1140 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1141
1142 int64_t then = timeval_ms();
1143 for (;;) {
1144 int stepped;
1145 uint32_t prsr;
1146
1147 retval = aarch64_check_state_one(target,
1148 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1149 if (retval != ERROR_OK || stepped)
1150 break;
1151
1152 if (timeval_ms() > then + 100) {
1153 LOG_ERROR("timeout waiting for target %s halt after step",
1154 target_name(target));
1155 retval = ERROR_TARGET_TIMEOUT;
1156 break;
1157 }
1158 }
1159
1160 /*
1161 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1162 * causes a timeout. The core takes the step but doesn't complete it and so
1163 * debug state is never entered. However, you can manually halt the core
1164 * as an external debug even is also a WFI wakeup event.
1165 */
1166 if (retval == ERROR_TARGET_TIMEOUT)
1167 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1168
1169 /* restore EDECR */
1170 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1171 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1172 if (retval != ERROR_OK)
1173 return retval;
1174
1175 /* restore interrupts */
1176 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1177 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1178 if (retval != ERROR_OK)
1179 return ERROR_OK;
1180 }
1181
1182 if (saved_retval != ERROR_OK)
1183 return saved_retval;
1184
1185 return ERROR_OK;
1186 }
1187
1188 static int aarch64_restore_context(struct target *target, bool bpwp)
1189 {
1190 struct armv8_common *armv8 = target_to_armv8(target);
1191 struct arm *arm = &armv8->arm;
1192
1193 int retval;
1194
1195 LOG_DEBUG("%s", target_name(target));
1196
1197 if (armv8->pre_restore_context)
1198 armv8->pre_restore_context(target);
1199
1200 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1201 if (retval == ERROR_OK) {
1202 /* registers are now invalid */
1203 register_cache_invalidate(arm->core_cache);
1204 register_cache_invalidate(arm->core_cache->next);
1205 }
1206
1207 return retval;
1208 }
1209
1210 /*
1211 * Cortex-A8 Breakpoint and watchpoint functions
1212 */
1213
1214 /* Setup hardware Breakpoint Register Pair */
1215 static int aarch64_set_breakpoint(struct target *target,
1216 struct breakpoint *breakpoint, uint8_t matchmode)
1217 {
1218 int retval;
1219 int brp_i = 0;
1220 uint32_t control;
1221 uint8_t byte_addr_select = 0x0F;
1222 struct aarch64_common *aarch64 = target_to_aarch64(target);
1223 struct armv8_common *armv8 = &aarch64->armv8_common;
1224 struct aarch64_brp *brp_list = aarch64->brp_list;
1225
1226 if (breakpoint->set) {
1227 LOG_WARNING("breakpoint already set");
1228 return ERROR_OK;
1229 }
1230
1231 if (breakpoint->type == BKPT_HARD) {
1232 int64_t bpt_value;
1233 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1234 brp_i++;
1235 if (brp_i >= aarch64->brp_num) {
1236 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1238 }
1239 breakpoint->set = brp_i + 1;
1240 if (breakpoint->length == 2)
1241 byte_addr_select = (3 << (breakpoint->address & 0x02));
1242 control = ((matchmode & 0x7) << 20)
1243 | (1 << 13)
1244 | (byte_addr_select << 5)
1245 | (3 << 1) | 1;
1246 brp_list[brp_i].used = 1;
1247 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1248 brp_list[brp_i].control = control;
1249 bpt_value = brp_list[brp_i].value;
1250
1251 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1252 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1253 (uint32_t)(bpt_value & 0xFFFFFFFF));
1254 if (retval != ERROR_OK)
1255 return retval;
1256 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1257 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1258 (uint32_t)(bpt_value >> 32));
1259 if (retval != ERROR_OK)
1260 return retval;
1261
1262 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1263 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1264 brp_list[brp_i].control);
1265 if (retval != ERROR_OK)
1266 return retval;
1267 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1268 brp_list[brp_i].control,
1269 brp_list[brp_i].value);
1270
1271 } else if (breakpoint->type == BKPT_SOFT) {
1272 uint32_t opcode;
1273 uint8_t code[4];
1274
1275 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1276 opcode = ARMV8_HLT(11);
1277
1278 if (breakpoint->length != 4)
1279 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1280 } else {
1281 /**
1282 * core_state is ARM_STATE_ARM
1283 * in that case the opcode depends on breakpoint length:
1284 * - if length == 4 => A32 opcode
1285 * - if length == 2 => T32 opcode
1286 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1287 * in that case the length should be changed from 3 to 4 bytes
1288 **/
1289 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1290 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1291
1292 if (breakpoint->length == 3)
1293 breakpoint->length = 4;
1294 }
1295
1296 buf_set_u32(code, 0, 32, opcode);
1297
1298 retval = target_read_memory(target,
1299 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1300 breakpoint->length, 1,
1301 breakpoint->orig_instr);
1302 if (retval != ERROR_OK)
1303 return retval;
1304
1305 armv8_cache_d_inner_flush_virt(armv8,
1306 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1307 breakpoint->length);
1308
1309 retval = target_write_memory(target,
1310 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1311 breakpoint->length, 1, code);
1312 if (retval != ERROR_OK)
1313 return retval;
1314
1315 armv8_cache_d_inner_flush_virt(armv8,
1316 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1317 breakpoint->length);
1318
1319 armv8_cache_i_inner_inval_virt(armv8,
1320 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1321 breakpoint->length);
1322
1323 breakpoint->set = 0x11; /* Any nice value but 0 */
1324 }
1325
1326 /* Ensure that halting debug mode is enable */
1327 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1328 if (retval != ERROR_OK) {
1329 LOG_DEBUG("Failed to set DSCR.HDE");
1330 return retval;
1331 }
1332
1333 return ERROR_OK;
1334 }
1335
1336 static int aarch64_set_context_breakpoint(struct target *target,
1337 struct breakpoint *breakpoint, uint8_t matchmode)
1338 {
1339 int retval = ERROR_FAIL;
1340 int brp_i = 0;
1341 uint32_t control;
1342 uint8_t byte_addr_select = 0x0F;
1343 struct aarch64_common *aarch64 = target_to_aarch64(target);
1344 struct armv8_common *armv8 = &aarch64->armv8_common;
1345 struct aarch64_brp *brp_list = aarch64->brp_list;
1346
1347 if (breakpoint->set) {
1348 LOG_WARNING("breakpoint already set");
1349 return retval;
1350 }
1351 /*check available context BRPs*/
1352 while ((brp_list[brp_i].used ||
1353 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1354 brp_i++;
1355
1356 if (brp_i >= aarch64->brp_num) {
1357 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1358 return ERROR_FAIL;
1359 }
1360
1361 breakpoint->set = brp_i + 1;
1362 control = ((matchmode & 0x7) << 20)
1363 | (1 << 13)
1364 | (byte_addr_select << 5)
1365 | (3 << 1) | 1;
1366 brp_list[brp_i].used = 1;
1367 brp_list[brp_i].value = (breakpoint->asid);
1368 brp_list[brp_i].control = control;
1369 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1370 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1371 brp_list[brp_i].value);
1372 if (retval != ERROR_OK)
1373 return retval;
1374 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1375 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1376 brp_list[brp_i].control);
1377 if (retval != ERROR_OK)
1378 return retval;
1379 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1380 brp_list[brp_i].control,
1381 brp_list[brp_i].value);
1382 return ERROR_OK;
1383
1384 }
1385
1386 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1387 {
1388 int retval = ERROR_FAIL;
1389 int brp_1 = 0; /* holds the contextID pair */
1390 int brp_2 = 0; /* holds the IVA pair */
1391 uint32_t control_CTX, control_IVA;
1392 uint8_t CTX_byte_addr_select = 0x0F;
1393 uint8_t IVA_byte_addr_select = 0x0F;
1394 uint8_t CTX_machmode = 0x03;
1395 uint8_t IVA_machmode = 0x01;
1396 struct aarch64_common *aarch64 = target_to_aarch64(target);
1397 struct armv8_common *armv8 = &aarch64->armv8_common;
1398 struct aarch64_brp *brp_list = aarch64->brp_list;
1399
1400 if (breakpoint->set) {
1401 LOG_WARNING("breakpoint already set");
1402 return retval;
1403 }
1404 /*check available context BRPs*/
1405 while ((brp_list[brp_1].used ||
1406 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1407 brp_1++;
1408
1409 printf("brp(CTX) found num: %d\n", brp_1);
1410 if (brp_1 >= aarch64->brp_num) {
1411 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1412 return ERROR_FAIL;
1413 }
1414
1415 while ((brp_list[brp_2].used ||
1416 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1417 brp_2++;
1418
1419 printf("brp(IVA) found num: %d\n", brp_2);
1420 if (brp_2 >= aarch64->brp_num) {
1421 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1422 return ERROR_FAIL;
1423 }
1424
1425 breakpoint->set = brp_1 + 1;
1426 breakpoint->linked_BRP = brp_2;
1427 control_CTX = ((CTX_machmode & 0x7) << 20)
1428 | (brp_2 << 16)
1429 | (0 << 14)
1430 | (CTX_byte_addr_select << 5)
1431 | (3 << 1) | 1;
1432 brp_list[brp_1].used = 1;
1433 brp_list[brp_1].value = (breakpoint->asid);
1434 brp_list[brp_1].control = control_CTX;
1435 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1436 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1437 brp_list[brp_1].value);
1438 if (retval != ERROR_OK)
1439 return retval;
1440 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1441 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1442 brp_list[brp_1].control);
1443 if (retval != ERROR_OK)
1444 return retval;
1445
1446 control_IVA = ((IVA_machmode & 0x7) << 20)
1447 | (brp_1 << 16)
1448 | (1 << 13)
1449 | (IVA_byte_addr_select << 5)
1450 | (3 << 1) | 1;
1451 brp_list[brp_2].used = 1;
1452 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1453 brp_list[brp_2].control = control_IVA;
1454 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1455 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1456 brp_list[brp_2].value & 0xFFFFFFFF);
1457 if (retval != ERROR_OK)
1458 return retval;
1459 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1460 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1461 brp_list[brp_2].value >> 32);
1462 if (retval != ERROR_OK)
1463 return retval;
1464 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1465 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1466 brp_list[brp_2].control);
1467 if (retval != ERROR_OK)
1468 return retval;
1469
1470 return ERROR_OK;
1471 }
1472
1473 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1474 {
1475 int retval;
1476 struct aarch64_common *aarch64 = target_to_aarch64(target);
1477 struct armv8_common *armv8 = &aarch64->armv8_common;
1478 struct aarch64_brp *brp_list = aarch64->brp_list;
1479
1480 if (!breakpoint->set) {
1481 LOG_WARNING("breakpoint not set");
1482 return ERROR_OK;
1483 }
1484
1485 if (breakpoint->type == BKPT_HARD) {
1486 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1487 int brp_i = breakpoint->set - 1;
1488 int brp_j = breakpoint->linked_BRP;
1489 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1490 LOG_DEBUG("Invalid BRP number in breakpoint");
1491 return ERROR_OK;
1492 }
1493 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1494 brp_list[brp_i].control, brp_list[brp_i].value);
1495 brp_list[brp_i].used = 0;
1496 brp_list[brp_i].value = 0;
1497 brp_list[brp_i].control = 0;
1498 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1499 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1500 brp_list[brp_i].control);
1501 if (retval != ERROR_OK)
1502 return retval;
1503 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1504 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1505 (uint32_t)brp_list[brp_i].value);
1506 if (retval != ERROR_OK)
1507 return retval;
1508 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1509 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1510 (uint32_t)brp_list[brp_i].value);
1511 if (retval != ERROR_OK)
1512 return retval;
1513 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1514 LOG_DEBUG("Invalid BRP number in breakpoint");
1515 return ERROR_OK;
1516 }
1517 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1518 brp_list[brp_j].control, brp_list[brp_j].value);
1519 brp_list[brp_j].used = 0;
1520 brp_list[brp_j].value = 0;
1521 brp_list[brp_j].control = 0;
1522 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1523 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1524 brp_list[brp_j].control);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1528 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1529 (uint32_t)brp_list[brp_j].value);
1530 if (retval != ERROR_OK)
1531 return retval;
1532 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1533 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1534 (uint32_t)brp_list[brp_j].value);
1535 if (retval != ERROR_OK)
1536 return retval;
1537
1538 breakpoint->linked_BRP = 0;
1539 breakpoint->set = 0;
1540 return ERROR_OK;
1541
1542 } else {
1543 int brp_i = breakpoint->set - 1;
1544 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1545 LOG_DEBUG("Invalid BRP number in breakpoint");
1546 return ERROR_OK;
1547 }
1548 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1549 brp_list[brp_i].control, brp_list[brp_i].value);
1550 brp_list[brp_i].used = 0;
1551 brp_list[brp_i].value = 0;
1552 brp_list[brp_i].control = 0;
1553 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1554 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1555 brp_list[brp_i].control);
1556 if (retval != ERROR_OK)
1557 return retval;
1558 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1559 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1560 brp_list[brp_i].value);
1561 if (retval != ERROR_OK)
1562 return retval;
1563
1564 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1565 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1566 (uint32_t)brp_list[brp_i].value);
1567 if (retval != ERROR_OK)
1568 return retval;
1569 breakpoint->set = 0;
1570 return ERROR_OK;
1571 }
1572 } else {
1573 /* restore original instruction (kept in target endianness) */
1574
1575 armv8_cache_d_inner_flush_virt(armv8,
1576 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1577 breakpoint->length);
1578
1579 if (breakpoint->length == 4) {
1580 retval = target_write_memory(target,
1581 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1582 4, 1, breakpoint->orig_instr);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 } else {
1586 retval = target_write_memory(target,
1587 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1588 2, 1, breakpoint->orig_instr);
1589 if (retval != ERROR_OK)
1590 return retval;
1591 }
1592
1593 armv8_cache_d_inner_flush_virt(armv8,
1594 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1595 breakpoint->length);
1596
1597 armv8_cache_i_inner_inval_virt(armv8,
1598 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1599 breakpoint->length);
1600 }
1601 breakpoint->set = 0;
1602
1603 return ERROR_OK;
1604 }
1605
1606 static int aarch64_add_breakpoint(struct target *target,
1607 struct breakpoint *breakpoint)
1608 {
1609 struct aarch64_common *aarch64 = target_to_aarch64(target);
1610
1611 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1612 LOG_INFO("no hardware breakpoint available");
1613 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1614 }
1615
1616 if (breakpoint->type == BKPT_HARD)
1617 aarch64->brp_num_available--;
1618
1619 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1620 }
1621
1622 static int aarch64_add_context_breakpoint(struct target *target,
1623 struct breakpoint *breakpoint)
1624 {
1625 struct aarch64_common *aarch64 = target_to_aarch64(target);
1626
1627 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1628 LOG_INFO("no hardware breakpoint available");
1629 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1630 }
1631
1632 if (breakpoint->type == BKPT_HARD)
1633 aarch64->brp_num_available--;
1634
1635 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1636 }
1637
1638 static int aarch64_add_hybrid_breakpoint(struct target *target,
1639 struct breakpoint *breakpoint)
1640 {
1641 struct aarch64_common *aarch64 = target_to_aarch64(target);
1642
1643 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1644 LOG_INFO("no hardware breakpoint available");
1645 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1646 }
1647
1648 if (breakpoint->type == BKPT_HARD)
1649 aarch64->brp_num_available--;
1650
1651 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1652 }
1653
1654
1655 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1656 {
1657 struct aarch64_common *aarch64 = target_to_aarch64(target);
1658
1659 #if 0
1660 /* It is perfectly possible to remove breakpoints while the target is running */
1661 if (target->state != TARGET_HALTED) {
1662 LOG_WARNING("target not halted");
1663 return ERROR_TARGET_NOT_HALTED;
1664 }
1665 #endif
1666
1667 if (breakpoint->set) {
1668 aarch64_unset_breakpoint(target, breakpoint);
1669 if (breakpoint->type == BKPT_HARD)
1670 aarch64->brp_num_available++;
1671 }
1672
1673 return ERROR_OK;
1674 }
1675
1676 /*
1677 * Cortex-A8 Reset functions
1678 */
1679
1680 static int aarch64_assert_reset(struct target *target)
1681 {
1682 struct armv8_common *armv8 = target_to_armv8(target);
1683
1684 LOG_DEBUG(" ");
1685
1686 /* FIXME when halt is requested, make it work somehow... */
1687
1688 /* Issue some kind of warm reset. */
1689 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1690 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1691 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1692 /* REVISIT handle "pulls" cases, if there's
1693 * hardware that needs them to work.
1694 */
1695 adapter_assert_reset();
1696 } else {
1697 LOG_ERROR("%s: how to reset?", target_name(target));
1698 return ERROR_FAIL;
1699 }
1700
1701 /* registers are now invalid */
1702 if (target_was_examined(target)) {
1703 register_cache_invalidate(armv8->arm.core_cache);
1704 register_cache_invalidate(armv8->arm.core_cache->next);
1705 }
1706
1707 target->state = TARGET_RESET;
1708
1709 return ERROR_OK;
1710 }
1711
1712 static int aarch64_deassert_reset(struct target *target)
1713 {
1714 int retval;
1715
1716 LOG_DEBUG(" ");
1717
1718 /* be certain SRST is off */
1719 adapter_deassert_reset();
1720
1721 if (!target_was_examined(target))
1722 return ERROR_OK;
1723
1724 retval = aarch64_poll(target);
1725 if (retval != ERROR_OK)
1726 return retval;
1727
1728 retval = aarch64_init_debug_access(target);
1729 if (retval != ERROR_OK)
1730 return retval;
1731
1732 if (target->reset_halt) {
1733 if (target->state != TARGET_HALTED) {
1734 LOG_WARNING("%s: ran after reset and before halt ...",
1735 target_name(target));
1736 retval = target_halt(target);
1737 }
1738 }
1739
1740 return retval;
1741 }
1742
1743 static int aarch64_write_cpu_memory_slow(struct target *target,
1744 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1745 {
1746 struct armv8_common *armv8 = target_to_armv8(target);
1747 struct arm_dpm *dpm = &armv8->dpm;
1748 struct arm *arm = &armv8->arm;
1749 int retval;
1750
1751 armv8_reg_current(arm, 1)->dirty = true;
1752
1753 /* change DCC to normal mode if necessary */
1754 if (*dscr & DSCR_MA) {
1755 *dscr &= ~DSCR_MA;
1756 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1757 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1758 if (retval != ERROR_OK)
1759 return retval;
1760 }
1761
1762 while (count) {
1763 uint32_t data, opcode;
1764
1765 /* write the data to store into DTRRX */
1766 if (size == 1)
1767 data = *buffer;
1768 else if (size == 2)
1769 data = target_buffer_get_u16(target, buffer);
1770 else
1771 data = target_buffer_get_u32(target, buffer);
1772 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1773 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1774 if (retval != ERROR_OK)
1775 return retval;
1776
1777 if (arm->core_state == ARM_STATE_AARCH64)
1778 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1779 else
1780 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1781 if (retval != ERROR_OK)
1782 return retval;
1783
1784 if (size == 1)
1785 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1786 else if (size == 2)
1787 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1788 else
1789 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1790 retval = dpm->instr_execute(dpm, opcode);
1791 if (retval != ERROR_OK)
1792 return retval;
1793
1794 /* Advance */
1795 buffer += size;
1796 --count;
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 static int aarch64_write_cpu_memory_fast(struct target *target,
1803 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1804 {
1805 struct armv8_common *armv8 = target_to_armv8(target);
1806 struct arm *arm = &armv8->arm;
1807 int retval;
1808
1809 armv8_reg_current(arm, 1)->dirty = true;
1810
1811 /* Step 1.d - Change DCC to memory mode */
1812 *dscr |= DSCR_MA;
1813 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1814 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1815 if (retval != ERROR_OK)
1816 return retval;
1817
1818
1819 /* Step 2.a - Do the write */
1820 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1821 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1822 if (retval != ERROR_OK)
1823 return retval;
1824
1825 /* Step 3.a - Switch DTR mode back to Normal mode */
1826 *dscr &= ~DSCR_MA;
1827 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1828 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1829 if (retval != ERROR_OK)
1830 return retval;
1831
1832 return ERROR_OK;
1833 }
1834
1835 static int aarch64_write_cpu_memory(struct target *target,
1836 uint64_t address, uint32_t size,
1837 uint32_t count, const uint8_t *buffer)
1838 {
1839 /* write memory through APB-AP */
1840 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1841 struct armv8_common *armv8 = target_to_armv8(target);
1842 struct arm_dpm *dpm = &armv8->dpm;
1843 struct arm *arm = &armv8->arm;
1844 uint32_t dscr;
1845
1846 if (target->state != TARGET_HALTED) {
1847 LOG_WARNING("target not halted");
1848 return ERROR_TARGET_NOT_HALTED;
1849 }
1850
1851 /* Mark register X0 as dirty, as it will be used
1852 * for transferring the data.
1853 * It will be restored automatically when exiting
1854 * debug mode
1855 */
1856 armv8_reg_current(arm, 0)->dirty = true;
1857
1858 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1859
1860 /* Read DSCR */
1861 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1862 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1863 if (retval != ERROR_OK)
1864 return retval;
1865
1866 /* Set Normal access mode */
1867 dscr = (dscr & ~DSCR_MA);
1868 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1869 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1870 if (retval != ERROR_OK)
1871 return retval;
1872
1873 if (arm->core_state == ARM_STATE_AARCH64) {
1874 /* Write X0 with value 'address' using write procedure */
1875 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1876 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1877 retval = dpm->instr_write_data_dcc_64(dpm,
1878 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1879 } else {
1880 /* Write R0 with value 'address' using write procedure */
1881 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1882 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1883 retval = dpm->instr_write_data_dcc(dpm,
1884 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1885 }
1886
1887 if (retval != ERROR_OK)
1888 return retval;
1889
1890 if (size == 4 && (address % 4) == 0)
1891 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1892 else
1893 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1894
1895 if (retval != ERROR_OK) {
1896 /* Unset DTR mode */
1897 mem_ap_read_atomic_u32(armv8->debug_ap,
1898 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1899 dscr &= ~DSCR_MA;
1900 mem_ap_write_atomic_u32(armv8->debug_ap,
1901 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1902 }
1903
1904 /* Check for sticky abort flags in the DSCR */
1905 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1906 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1907 if (retval != ERROR_OK)
1908 return retval;
1909
1910 dpm->dscr = dscr;
1911 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1912 /* Abort occurred - clear it and exit */
1913 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1914 armv8_dpm_handle_exception(dpm, true);
1915 return ERROR_FAIL;
1916 }
1917
1918 /* Done */
1919 return ERROR_OK;
1920 }
1921
1922 static int aarch64_read_cpu_memory_slow(struct target *target,
1923 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1924 {
1925 struct armv8_common *armv8 = target_to_armv8(target);
1926 struct arm_dpm *dpm = &armv8->dpm;
1927 struct arm *arm = &armv8->arm;
1928 int retval;
1929
1930 armv8_reg_current(arm, 1)->dirty = true;
1931
1932 /* change DCC to normal mode (if necessary) */
1933 if (*dscr & DSCR_MA) {
1934 *dscr &= DSCR_MA;
1935 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1936 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1937 if (retval != ERROR_OK)
1938 return retval;
1939 }
1940
1941 while (count) {
1942 uint32_t opcode, data;
1943
1944 if (size == 1)
1945 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1946 else if (size == 2)
1947 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1948 else
1949 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1950 retval = dpm->instr_execute(dpm, opcode);
1951 if (retval != ERROR_OK)
1952 return retval;
1953
1954 if (arm->core_state == ARM_STATE_AARCH64)
1955 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1956 else
1957 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1962 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1963 if (retval != ERROR_OK)
1964 return retval;
1965
1966 if (size == 1)
1967 *buffer = (uint8_t)data;
1968 else if (size == 2)
1969 target_buffer_set_u16(target, buffer, (uint16_t)data);
1970 else
1971 target_buffer_set_u32(target, buffer, data);
1972
1973 /* Advance */
1974 buffer += size;
1975 --count;
1976 }
1977
1978 return ERROR_OK;
1979 }
1980
1981 static int aarch64_read_cpu_memory_fast(struct target *target,
1982 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1983 {
1984 struct armv8_common *armv8 = target_to_armv8(target);
1985 struct arm_dpm *dpm = &armv8->dpm;
1986 struct arm *arm = &armv8->arm;
1987 int retval;
1988 uint32_t value;
1989
1990 /* Mark X1 as dirty */
1991 armv8_reg_current(arm, 1)->dirty = true;
1992
1993 if (arm->core_state == ARM_STATE_AARCH64) {
1994 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1995 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1996 } else {
1997 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1998 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1999 }
2000
2001 if (retval != ERROR_OK)
2002 return retval;
2003
2004 /* Step 1.e - Change DCC to memory mode */
2005 *dscr |= DSCR_MA;
2006 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2007 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2008 if (retval != ERROR_OK)
2009 return retval;
2010
2011 /* Step 1.f - read DBGDTRTX and discard the value */
2012 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2013 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2014 if (retval != ERROR_OK)
2015 return retval;
2016
2017 count--;
2018 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2019 * Abort flags are sticky, so can be read at end of transactions
2020 *
2021 * This data is read in aligned to 32 bit boundary.
2022 */
2023
2024 if (count) {
2025 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2026 * increments X0 by 4. */
2027 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2028 armv8->debug_base + CPUV8_DBG_DTRTX);
2029 if (retval != ERROR_OK)
2030 return retval;
2031 }
2032
2033 /* Step 3.a - set DTR access mode back to Normal mode */
2034 *dscr &= ~DSCR_MA;
2035 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2036 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2037 if (retval != ERROR_OK)
2038 return retval;
2039
2040 /* Step 3.b - read DBGDTRTX for the final value */
2041 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2042 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 target_buffer_set_u32(target, buffer + count * 4, value);
2047 return retval;
2048 }
2049
2050 static int aarch64_read_cpu_memory(struct target *target,
2051 target_addr_t address, uint32_t size,
2052 uint32_t count, uint8_t *buffer)
2053 {
2054 /* read memory through APB-AP */
2055 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2056 struct armv8_common *armv8 = target_to_armv8(target);
2057 struct arm_dpm *dpm = &armv8->dpm;
2058 struct arm *arm = &armv8->arm;
2059 uint32_t dscr;
2060
2061 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2062 address, size, count);
2063
2064 if (target->state != TARGET_HALTED) {
2065 LOG_WARNING("target not halted");
2066 return ERROR_TARGET_NOT_HALTED;
2067 }
2068
2069 /* Mark register X0 as dirty, as it will be used
2070 * for transferring the data.
2071 * It will be restored automatically when exiting
2072 * debug mode
2073 */
2074 armv8_reg_current(arm, 0)->dirty = true;
2075
2076 /* Read DSCR */
2077 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2078 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2079 if (retval != ERROR_OK)
2080 return retval;
2081
2082 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2083
2084 /* Set Normal access mode */
2085 dscr &= ~DSCR_MA;
2086 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2087 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2088 if (retval != ERROR_OK)
2089 return retval;
2090
2091 if (arm->core_state == ARM_STATE_AARCH64) {
2092 /* Write X0 with value 'address' using write procedure */
2093 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2094 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2095 retval = dpm->instr_write_data_dcc_64(dpm,
2096 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2097 } else {
2098 /* Write R0 with value 'address' using write procedure */
2099 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2100 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2101 retval = dpm->instr_write_data_dcc(dpm,
2102 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2103 }
2104
2105 if (retval != ERROR_OK)
2106 return retval;
2107
2108 if (size == 4 && (address % 4) == 0)
2109 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2110 else
2111 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2112
2113 if (dscr & DSCR_MA) {
2114 dscr &= ~DSCR_MA;
2115 mem_ap_write_atomic_u32(armv8->debug_ap,
2116 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2117 }
2118
2119 if (retval != ERROR_OK)
2120 return retval;
2121
2122 /* Check for sticky abort flags in the DSCR */
2123 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2124 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2125 if (retval != ERROR_OK)
2126 return retval;
2127
2128 dpm->dscr = dscr;
2129
2130 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2131 /* Abort occurred - clear it and exit */
2132 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2133 armv8_dpm_handle_exception(dpm, true);
2134 return ERROR_FAIL;
2135 }
2136
2137 /* Done */
2138 return ERROR_OK;
2139 }
2140
2141 static int aarch64_read_phys_memory(struct target *target,
2142 target_addr_t address, uint32_t size,
2143 uint32_t count, uint8_t *buffer)
2144 {
2145 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2146
2147 if (count && buffer) {
2148 /* read memory through APB-AP */
2149 retval = aarch64_mmu_modify(target, 0);
2150 if (retval != ERROR_OK)
2151 return retval;
2152 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2153 }
2154 return retval;
2155 }
2156
2157 static int aarch64_read_memory(struct target *target, target_addr_t address,
2158 uint32_t size, uint32_t count, uint8_t *buffer)
2159 {
2160 int mmu_enabled = 0;
2161 int retval;
2162
2163 /* determine if MMU was enabled on target stop */
2164 retval = aarch64_mmu(target, &mmu_enabled);
2165 if (retval != ERROR_OK)
2166 return retval;
2167
2168 if (mmu_enabled) {
2169 /* enable MMU as we could have disabled it for phys access */
2170 retval = aarch64_mmu_modify(target, 1);
2171 if (retval != ERROR_OK)
2172 return retval;
2173 }
2174 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2175 }
2176
2177 static int aarch64_write_phys_memory(struct target *target,
2178 target_addr_t address, uint32_t size,
2179 uint32_t count, const uint8_t *buffer)
2180 {
2181 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2182
2183 if (count && buffer) {
2184 /* write memory through APB-AP */
2185 retval = aarch64_mmu_modify(target, 0);
2186 if (retval != ERROR_OK)
2187 return retval;
2188 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2189 }
2190
2191 return retval;
2192 }
2193
2194 static int aarch64_write_memory(struct target *target, target_addr_t address,
2195 uint32_t size, uint32_t count, const uint8_t *buffer)
2196 {
2197 int mmu_enabled = 0;
2198 int retval;
2199
2200 /* determine if MMU was enabled on target stop */
2201 retval = aarch64_mmu(target, &mmu_enabled);
2202 if (retval != ERROR_OK)
2203 return retval;
2204
2205 if (mmu_enabled) {
2206 /* enable MMU as we could have disabled it for phys access */
2207 retval = aarch64_mmu_modify(target, 1);
2208 if (retval != ERROR_OK)
2209 return retval;
2210 }
2211 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2212 }
2213
2214 static int aarch64_handle_target_request(void *priv)
2215 {
2216 struct target *target = priv;
2217 struct armv8_common *armv8 = target_to_armv8(target);
2218 int retval;
2219
2220 if (!target_was_examined(target))
2221 return ERROR_OK;
2222 if (!target->dbg_msg_enabled)
2223 return ERROR_OK;
2224
2225 if (target->state == TARGET_RUNNING) {
2226 uint32_t request;
2227 uint32_t dscr;
2228 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2229 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2230
2231 /* check if we have data */
2232 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2233 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2234 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2235 if (retval == ERROR_OK) {
2236 target_request(target, request);
2237 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2238 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2239 }
2240 }
2241 }
2242
2243 return ERROR_OK;
2244 }
2245
2246 static int aarch64_examine_first(struct target *target)
2247 {
2248 struct aarch64_common *aarch64 = target_to_aarch64(target);
2249 struct armv8_common *armv8 = &aarch64->armv8_common;
2250 struct adiv5_dap *swjdp = armv8->arm.dap;
2251 struct aarch64_private_config *pc = target->private_config;
2252 int i;
2253 int retval = ERROR_OK;
2254 uint64_t debug, ttypr;
2255 uint32_t cpuid;
2256 uint32_t tmp0, tmp1, tmp2, tmp3;
2257 debug = ttypr = cpuid = 0;
2258
2259 if (pc == NULL)
2260 return ERROR_FAIL;
2261
2262 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2263 /* Search for the APB-AB */
2264 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2265 if (retval != ERROR_OK) {
2266 LOG_ERROR("Could not find APB-AP for debug access");
2267 return retval;
2268 }
2269 } else {
2270 armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
2271 }
2272
2273 retval = mem_ap_init(armv8->debug_ap);
2274 if (retval != ERROR_OK) {
2275 LOG_ERROR("Could not initialize the APB-AP");
2276 return retval;
2277 }
2278
2279 armv8->debug_ap->memaccess_tck = 10;
2280
2281 if (!target->dbgbase_set) {
2282 uint32_t dbgbase;
2283 /* Get ROM Table base */
2284 uint32_t apid;
2285 int32_t coreidx = target->coreid;
2286 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2287 if (retval != ERROR_OK)
2288 return retval;
2289 /* Lookup 0x15 -- Processor DAP */
2290 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2291 &armv8->debug_base, &coreidx);
2292 if (retval != ERROR_OK)
2293 return retval;
2294 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2295 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2296 } else
2297 armv8->debug_base = target->dbgbase;
2298
2299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2300 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2301 if (retval != ERROR_OK) {
2302 LOG_DEBUG("Examine %s failed", "oslock");
2303 return retval;
2304 }
2305
2306 retval = mem_ap_read_u32(armv8->debug_ap,
2307 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2308 if (retval != ERROR_OK) {
2309 LOG_DEBUG("Examine %s failed", "CPUID");
2310 return retval;
2311 }
2312
2313 retval = mem_ap_read_u32(armv8->debug_ap,
2314 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2315 retval += mem_ap_read_u32(armv8->debug_ap,
2316 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2317 if (retval != ERROR_OK) {
2318 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2319 return retval;
2320 }
2321 retval = mem_ap_read_u32(armv8->debug_ap,
2322 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2323 retval += mem_ap_read_u32(armv8->debug_ap,
2324 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2325 if (retval != ERROR_OK) {
2326 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2327 return retval;
2328 }
2329
2330 retval = dap_run(armv8->debug_ap->dap);
2331 if (retval != ERROR_OK) {
2332 LOG_ERROR("%s: examination failed\n", target_name(target));
2333 return retval;
2334 }
2335
2336 ttypr |= tmp1;
2337 ttypr = (ttypr << 32) | tmp0;
2338 debug |= tmp3;
2339 debug = (debug << 32) | tmp2;
2340
2341 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2342 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2343 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2344
2345 if (pc->cti == NULL)
2346 return ERROR_FAIL;
2347
2348 armv8->cti = pc->cti;
2349
2350 retval = aarch64_dpm_setup(aarch64, debug);
2351 if (retval != ERROR_OK)
2352 return retval;
2353
2354 /* Setup Breakpoint Register Pairs */
2355 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2356 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2357 aarch64->brp_num_available = aarch64->brp_num;
2358 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2359 for (i = 0; i < aarch64->brp_num; i++) {
2360 aarch64->brp_list[i].used = 0;
2361 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2362 aarch64->brp_list[i].type = BRP_NORMAL;
2363 else
2364 aarch64->brp_list[i].type = BRP_CONTEXT;
2365 aarch64->brp_list[i].value = 0;
2366 aarch64->brp_list[i].control = 0;
2367 aarch64->brp_list[i].BRPn = i;
2368 }
2369
2370 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2371
2372 target->state = TARGET_UNKNOWN;
2373 target->debug_reason = DBG_REASON_NOTHALTED;
2374 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2375 target_set_examined(target);
2376 return ERROR_OK;
2377 }
2378
2379 static int aarch64_examine(struct target *target)
2380 {
2381 int retval = ERROR_OK;
2382
2383 /* don't re-probe hardware after each reset */
2384 if (!target_was_examined(target))
2385 retval = aarch64_examine_first(target);
2386
2387 /* Configure core debug access */
2388 if (retval == ERROR_OK)
2389 retval = aarch64_init_debug_access(target);
2390
2391 return retval;
2392 }
2393
2394 /*
2395 * Cortex-A8 target creation and initialization
2396 */
2397
2398 static int aarch64_init_target(struct command_context *cmd_ctx,
2399 struct target *target)
2400 {
2401 /* examine_first() does a bunch of this */
2402 arm_semihosting_init(target);
2403 return ERROR_OK;
2404 }
2405
2406 static int aarch64_init_arch_info(struct target *target,
2407 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2408 {
2409 struct armv8_common *armv8 = &aarch64->armv8_common;
2410
2411 /* Setup struct aarch64_common */
2412 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2413 armv8->arm.dap = dap;
2414
2415 /* register arch-specific functions */
2416 armv8->examine_debug_reason = NULL;
2417 armv8->post_debug_entry = aarch64_post_debug_entry;
2418 armv8->pre_restore_context = NULL;
2419 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2420
2421 armv8_init_arch_info(target, armv8);
2422 target_register_timer_callback(aarch64_handle_target_request, 1,
2423 TARGET_TIMER_TYPE_PERIODIC, target);
2424
2425 return ERROR_OK;
2426 }
2427
2428 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2429 {
2430 struct aarch64_private_config *pc = target->private_config;
2431 struct aarch64_common *aarch64;
2432
2433 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2434 return ERROR_FAIL;
2435
2436 aarch64 = calloc(1, sizeof(struct aarch64_common));
2437 if (aarch64 == NULL) {
2438 LOG_ERROR("Out of memory");
2439 return ERROR_FAIL;
2440 }
2441
2442 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2443 }
2444
2445 static void aarch64_deinit_target(struct target *target)
2446 {
2447 struct aarch64_common *aarch64 = target_to_aarch64(target);
2448 struct armv8_common *armv8 = &aarch64->armv8_common;
2449 struct arm_dpm *dpm = &armv8->dpm;
2450
2451 armv8_free_reg_cache(target);
2452 free(aarch64->brp_list);
2453 free(dpm->dbp);
2454 free(dpm->dwp);
2455 free(target->private_config);
2456 free(aarch64);
2457 }
2458
2459 static int aarch64_mmu(struct target *target, int *enabled)
2460 {
2461 if (target->state != TARGET_HALTED) {
2462 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2463 return ERROR_TARGET_INVALID;
2464 }
2465
2466 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2467 return ERROR_OK;
2468 }
2469
2470 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2471 target_addr_t *phys)
2472 {
2473 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2474 }
2475
2476 /*
2477 * private target configuration items
2478 */
2479 enum aarch64_cfg_param {
2480 CFG_CTI,
2481 };
2482
2483 static const Jim_Nvp nvp_config_opts[] = {
2484 { .name = "-cti", .value = CFG_CTI },
2485 { .name = NULL, .value = -1 }
2486 };
2487
2488 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2489 {
2490 struct aarch64_private_config *pc;
2491 Jim_Nvp *n;
2492 int e;
2493
2494 pc = (struct aarch64_private_config *)target->private_config;
2495 if (pc == NULL) {
2496 pc = calloc(1, sizeof(struct aarch64_private_config));
2497 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2498 target->private_config = pc;
2499 }
2500
2501 /*
2502 * Call adiv5_jim_configure() to parse the common DAP options
2503 * It will return JIM_CONTINUE if it didn't find any known
2504 * options, JIM_OK if it correctly parsed the topmost option
2505 * and JIM_ERR if an error occurred during parameter evaluation.
2506 * For JIM_CONTINUE, we check our own params.
2507 *
2508 * adiv5_jim_configure() assumes 'private_config' to point to
2509 * 'struct adiv5_private_config'. Override 'private_config'!
2510 */
2511 target->private_config = &pc->adiv5_config;
2512 e = adiv5_jim_configure(target, goi);
2513 target->private_config = pc;
2514 if (e != JIM_CONTINUE)
2515 return e;
2516
2517 /* parse config or cget options ... */
2518 if (goi->argc > 0) {
2519 Jim_SetEmptyResult(goi->interp);
2520
2521 /* check first if topmost item is for us */
2522 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2523 goi->argv[0], &n);
2524 if (e != JIM_OK)
2525 return JIM_CONTINUE;
2526
2527 e = Jim_GetOpt_Obj(goi, NULL);
2528 if (e != JIM_OK)
2529 return e;
2530
2531 switch (n->value) {
2532 case CFG_CTI: {
2533 if (goi->isconfigure) {
2534 Jim_Obj *o_cti;
2535 struct arm_cti *cti;
2536 e = Jim_GetOpt_Obj(goi, &o_cti);
2537 if (e != JIM_OK)
2538 return e;
2539 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2540 if (cti == NULL) {
2541 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2542 return JIM_ERR;
2543 }
2544 pc->cti = cti;
2545 } else {
2546 if (goi->argc != 0) {
2547 Jim_WrongNumArgs(goi->interp,
2548 goi->argc, goi->argv,
2549 "NO PARAMS");
2550 return JIM_ERR;
2551 }
2552
2553 if (pc == NULL || pc->cti == NULL) {
2554 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2555 return JIM_ERR;
2556 }
2557 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2558 }
2559 break;
2560 }
2561
2562 default:
2563 return JIM_CONTINUE;
2564 }
2565 }
2566
2567 return JIM_OK;
2568 }
2569
2570 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2571 {
2572 struct target *target = get_current_target(CMD_CTX);
2573 struct armv8_common *armv8 = target_to_armv8(target);
2574
2575 return armv8_handle_cache_info_command(CMD,
2576 &armv8->armv8_mmu.armv8_cache);
2577 }
2578
2579 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2580 {
2581 struct target *target = get_current_target(CMD_CTX);
2582 if (!target_was_examined(target)) {
2583 LOG_ERROR("target not examined yet");
2584 return ERROR_FAIL;
2585 }
2586
2587 return aarch64_init_debug_access(target);
2588 }
2589
2590 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2591 {
2592 struct target *target = get_current_target(CMD_CTX);
2593
2594 if (target == NULL) {
2595 LOG_ERROR("No target selected");
2596 return ERROR_FAIL;
2597 }
2598
2599 struct aarch64_common *aarch64 = target_to_aarch64(target);
2600
2601 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2602 command_print(CMD, "current target isn't an AArch64");
2603 return ERROR_FAIL;
2604 }
2605
2606 int count = 1;
2607 target_addr_t address;
2608
2609 switch (CMD_ARGC) {
2610 case 2:
2611 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2612 /* FALL THROUGH */
2613 case 1:
2614 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2615 break;
2616 default:
2617 return ERROR_COMMAND_SYNTAX_ERROR;
2618 }
2619
2620 return a64_disassemble(CMD, target, address, count);
2621 }
2622
2623 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2624 {
2625 struct target *target = get_current_target(CMD_CTX);
2626 struct aarch64_common *aarch64 = target_to_aarch64(target);
2627
2628 static const Jim_Nvp nvp_maskisr_modes[] = {
2629 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2630 { .name = "on", .value = AARCH64_ISRMASK_ON },
2631 { .name = NULL, .value = -1 },
2632 };
2633 const Jim_Nvp *n;
2634
2635 if (CMD_ARGC > 0) {
2636 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2637 if (n->name == NULL) {
2638 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2639 return ERROR_COMMAND_SYNTAX_ERROR;
2640 }
2641
2642 aarch64->isrmasking_mode = n->value;
2643 }
2644
2645 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2646 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2647
2648 return ERROR_OK;
2649 }
2650
2651 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2652 {
2653 struct command_context *context;
2654 struct target *target;
2655 struct arm *arm;
2656 int retval;
2657 bool is_mcr = false;
2658 int arg_cnt = 0;
2659
2660 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2661 is_mcr = true;
2662 arg_cnt = 7;
2663 } else {
2664 arg_cnt = 6;
2665 }
2666
2667 context = current_command_context(interp);
2668 assert(context != NULL);
2669
2670 target = get_current_target(context);
2671 if (target == NULL) {
2672 LOG_ERROR("%s: no current target", __func__);
2673 return JIM_ERR;
2674 }
2675 if (!target_was_examined(target)) {
2676 LOG_ERROR("%s: not yet examined", target_name(target));
2677 return JIM_ERR;
2678 }
2679
2680 arm = target_to_arm(target);
2681 if (!is_arm(arm)) {
2682 LOG_ERROR("%s: not an ARM", target_name(target));
2683 return JIM_ERR;
2684 }
2685
2686 if (target->state != TARGET_HALTED)
2687 return ERROR_TARGET_NOT_HALTED;
2688
2689 if (arm->core_state == ARM_STATE_AARCH64) {
2690 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2691 return JIM_ERR;
2692 }
2693
2694 if (argc != arg_cnt) {
2695 LOG_ERROR("%s: wrong number of arguments", __func__);
2696 return JIM_ERR;
2697 }
2698
2699 int cpnum;
2700 uint32_t op1;
2701 uint32_t op2;
2702 uint32_t CRn;
2703 uint32_t CRm;
2704 uint32_t value;
2705 long l;
2706
2707 /* NOTE: parameter sequence matches ARM instruction set usage:
2708 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2709 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2710 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2711 */
2712 retval = Jim_GetLong(interp, argv[1], &l);
2713 if (retval != JIM_OK)
2714 return retval;
2715 if (l & ~0xf) {
2716 LOG_ERROR("%s: %s %d out of range", __func__,
2717 "coprocessor", (int) l);
2718 return JIM_ERR;
2719 }
2720 cpnum = l;
2721
2722 retval = Jim_GetLong(interp, argv[2], &l);
2723 if (retval != JIM_OK)
2724 return retval;
2725 if (l & ~0x7) {
2726 LOG_ERROR("%s: %s %d out of range", __func__,
2727 "op1", (int) l);
2728 return JIM_ERR;
2729 }
2730 op1 = l;
2731
2732 retval = Jim_GetLong(interp, argv[3], &l);
2733 if (retval != JIM_OK)
2734 return retval;
2735 if (l & ~0xf) {
2736 LOG_ERROR("%s: %s %d out of range", __func__,
2737 "CRn", (int) l);
2738 return JIM_ERR;
2739 }
2740 CRn = l;
2741
2742 retval = Jim_GetLong(interp, argv[4], &l);
2743 if (retval != JIM_OK)
2744 return retval;
2745 if (l & ~0xf) {
2746 LOG_ERROR("%s: %s %d out of range", __func__,
2747 "CRm", (int) l);
2748 return JIM_ERR;
2749 }
2750 CRm = l;
2751
2752 retval = Jim_GetLong(interp, argv[5], &l);
2753 if (retval != JIM_OK)
2754 return retval;
2755 if (l & ~0x7) {
2756 LOG_ERROR("%s: %s %d out of range", __func__,
2757 "op2", (int) l);
2758 return JIM_ERR;
2759 }
2760 op2 = l;
2761
2762 value = 0;
2763
2764 if (is_mcr == true) {
2765 retval = Jim_GetLong(interp, argv[6], &l);
2766 if (retval != JIM_OK)
2767 return retval;
2768 value = l;
2769
2770 /* NOTE: parameters reordered! */
2771 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2772 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2773 if (retval != ERROR_OK)
2774 return JIM_ERR;
2775 } else {
2776 /* NOTE: parameters reordered! */
2777 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2778 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2779 if (retval != ERROR_OK)
2780 return JIM_ERR;
2781
2782 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2783 }
2784
2785 return JIM_OK;
2786 }
2787
2788 static const struct command_registration aarch64_exec_command_handlers[] = {
2789 {
2790 .name = "cache_info",
2791 .handler = aarch64_handle_cache_info_command,
2792 .mode = COMMAND_EXEC,
2793 .help = "display information about target caches",
2794 .usage = "",
2795 },
2796 {
2797 .name = "dbginit",
2798 .handler = aarch64_handle_dbginit_command,
2799 .mode = COMMAND_EXEC,
2800 .help = "Initialize core debug",
2801 .usage = "",
2802 },
2803 {
2804 .name = "disassemble",
2805 .handler = aarch64_handle_disassemble_command,
2806 .mode = COMMAND_EXEC,
2807 .help = "Disassemble instructions",
2808 .usage = "address [count]",
2809 },
2810 {
2811 .name = "maskisr",
2812 .handler = aarch64_mask_interrupts_command,
2813 .mode = COMMAND_ANY,
2814 .help = "mask aarch64 interrupts during single-step",
2815 .usage = "['on'|'off']",
2816 },
2817 {
2818 .name = "mcr",
2819 .mode = COMMAND_EXEC,
2820 .jim_handler = jim_mcrmrc,
2821 .help = "write coprocessor register",
2822 .usage = "cpnum op1 CRn CRm op2 value",
2823 },
2824 {
2825 .name = "mrc",
2826 .mode = COMMAND_EXEC,
2827 .jim_handler = jim_mcrmrc,
2828 .help = "read coprocessor register",
2829 .usage = "cpnum op1 CRn CRm op2",
2830 },
2831 {
2832 .chain = smp_command_handlers,
2833 },
2834
2835
2836 COMMAND_REGISTRATION_DONE
2837 };
2838
2839 extern const struct command_registration semihosting_common_handlers[];
2840
2841 static const struct command_registration aarch64_command_handlers[] = {
2842 {
2843 .name = "arm",
2844 .mode = COMMAND_ANY,
2845 .help = "ARM Command Group",
2846 .usage = "",
2847 .chain = semihosting_common_handlers
2848 },
2849 {
2850 .chain = armv8_command_handlers,
2851 },
2852 {
2853 .name = "aarch64",
2854 .mode = COMMAND_ANY,
2855 .help = "Aarch64 command group",
2856 .usage = "",
2857 .chain = aarch64_exec_command_handlers,
2858 },
2859 COMMAND_REGISTRATION_DONE
2860 };
2861
2862 struct target_type aarch64_target = {
2863 .name = "aarch64",
2864
2865 .poll = aarch64_poll,
2866 .arch_state = armv8_arch_state,
2867
2868 .halt = aarch64_halt,
2869 .resume = aarch64_resume,
2870 .step = aarch64_step,
2871
2872 .assert_reset = aarch64_assert_reset,
2873 .deassert_reset = aarch64_deassert_reset,
2874
2875 /* REVISIT allow exporting VFP3 registers ... */
2876 .get_gdb_arch = armv8_get_gdb_arch,
2877 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2878
2879 .read_memory = aarch64_read_memory,
2880 .write_memory = aarch64_write_memory,
2881
2882 .add_breakpoint = aarch64_add_breakpoint,
2883 .add_context_breakpoint = aarch64_add_context_breakpoint,
2884 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2885 .remove_breakpoint = aarch64_remove_breakpoint,
2886 .add_watchpoint = NULL,
2887 .remove_watchpoint = NULL,
2888
2889 .commands = aarch64_command_handlers,
2890 .target_create = aarch64_target_create,
2891 .target_jim_configure = aarch64_jim_configure,
2892 .init_target = aarch64_init_target,
2893 .deinit_target = aarch64_deinit_target,
2894 .examine = aarch64_examine,
2895
2896 .read_phys_memory = aarch64_read_phys_memory,
2897 .write_phys_memory = aarch64_write_phys_memory,
2898 .mmu = aarch64_mmu,
2899 .virt2phys = aarch64_virt2phys,
2900 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)