aarch64: add support for "reset halt"
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "a64_disassembler.h"
27 #include "register.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_semihosting.h"
33 #include "jtag/interface.h"
34 #include "smp.h"
35 #include <helper/time_support.h>
36
37 enum restart_mode {
38 RESTART_LAZY,
39 RESTART_SYNC,
40 };
41
42 enum halt_mode {
43 HALT_LAZY,
44 HALT_SYNC,
45 };
46
47 struct aarch64_private_config {
48 struct adiv5_private_config adiv5_config;
49 struct arm_cti *cti;
50 };
51
52 static int aarch64_poll(struct target *target);
53 static int aarch64_debug_entry(struct target *target);
54 static int aarch64_restore_context(struct target *target, bool bpwp);
55 static int aarch64_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int aarch64_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int aarch64_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int aarch64_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int aarch64_mmu(struct target *target, int *enabled);
64 static int aarch64_virt2phys(struct target *target,
65 target_addr_t virt, target_addr_t *phys);
66 static int aarch64_read_cpu_memory(struct target *target,
67 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
68
69 static int aarch64_restore_system_control_reg(struct target *target)
70 {
71 enum arm_mode target_mode = ARM_MODE_ANY;
72 int retval = ERROR_OK;
73 uint32_t instr;
74
75 struct aarch64_common *aarch64 = target_to_aarch64(target);
76 struct armv8_common *armv8 = target_to_armv8(target);
77
78 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
79 aarch64->system_control_reg_curr = aarch64->system_control_reg;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81
82 switch (armv8->arm.core_mode) {
83 case ARMV8_64_EL0T:
84 target_mode = ARMV8_64_EL1H;
85 /* fall through */
86 case ARMV8_64_EL1T:
87 case ARMV8_64_EL1H:
88 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
89 break;
90 case ARMV8_64_EL2T:
91 case ARMV8_64_EL2H:
92 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
93 break;
94 case ARMV8_64_EL3H:
95 case ARMV8_64_EL3T:
96 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
97 break;
98
99 case ARM_MODE_SVC:
100 case ARM_MODE_ABT:
101 case ARM_MODE_FIQ:
102 case ARM_MODE_IRQ:
103 case ARM_MODE_HYP:
104 case ARM_MODE_SYS:
105 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
106 break;
107
108 default:
109 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
110 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
111 return ERROR_FAIL;
112 }
113
114 if (target_mode != ARM_MODE_ANY)
115 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
116
117 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
118 if (retval != ERROR_OK)
119 return retval;
120
121 if (target_mode != ARM_MODE_ANY)
122 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
123 }
124
125 return retval;
126 }
127
128 /* modify system_control_reg in order to enable or disable mmu for :
129 * - virt2phys address conversion
130 * - read or write memory in phys or virt address */
131 static int aarch64_mmu_modify(struct target *target, int enable)
132 {
133 struct aarch64_common *aarch64 = target_to_aarch64(target);
134 struct armv8_common *armv8 = &aarch64->armv8_common;
135 int retval = ERROR_OK;
136 uint32_t instr = 0;
137
138 if (enable) {
139 /* if mmu enabled at target stop and mmu not enable */
140 if (!(aarch64->system_control_reg & 0x1U)) {
141 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
142 return ERROR_FAIL;
143 }
144 if (!(aarch64->system_control_reg_curr & 0x1U))
145 aarch64->system_control_reg_curr |= 0x1U;
146 } else {
147 if (aarch64->system_control_reg_curr & 0x4U) {
148 /* data cache is active */
149 aarch64->system_control_reg_curr &= ~0x4U;
150 /* flush data cache armv8 function to be called */
151 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
152 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
153 }
154 if ((aarch64->system_control_reg_curr & 0x1U)) {
155 aarch64->system_control_reg_curr &= ~0x1U;
156 }
157 }
158
159 switch (armv8->arm.core_mode) {
160 case ARMV8_64_EL0T:
161 case ARMV8_64_EL1T:
162 case ARMV8_64_EL1H:
163 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
164 break;
165 case ARMV8_64_EL2T:
166 case ARMV8_64_EL2H:
167 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
168 break;
169 case ARMV8_64_EL3H:
170 case ARMV8_64_EL3T:
171 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
172 break;
173
174 case ARM_MODE_SVC:
175 case ARM_MODE_ABT:
176 case ARM_MODE_FIQ:
177 case ARM_MODE_IRQ:
178 case ARM_MODE_HYP:
179 case ARM_MODE_SYS:
180 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
181 break;
182
183 default:
184 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
185 break;
186 }
187
188 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
189 aarch64->system_control_reg_curr);
190 return retval;
191 }
192
193 /*
194 * Basic debug access, very low level assumes state is saved
195 */
196 static int aarch64_init_debug_access(struct target *target)
197 {
198 struct armv8_common *armv8 = target_to_armv8(target);
199 int retval;
200 uint32_t dummy;
201
202 LOG_DEBUG("%s", target_name(target));
203
204 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
205 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
206 if (retval != ERROR_OK) {
207 LOG_DEBUG("Examine %s failed", "oslock");
208 return retval;
209 }
210
211 /* Clear Sticky Power Down status Bit in PRSR to enable access to
212 the registers in the Core Power Domain */
213 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
214 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
215 if (retval != ERROR_OK)
216 return retval;
217
218 /*
219 * Static CTI configuration:
220 * Channel 0 -> trigger outputs HALT request to PE
221 * Channel 1 -> trigger outputs Resume request to PE
222 * Gate all channel trigger events from entering the CTM
223 */
224
225 /* Enable CTI */
226 retval = arm_cti_enable(armv8->cti, true);
227 /* By default, gate all channel events to and from the CTM */
228 if (retval == ERROR_OK)
229 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
230 /* output halt requests to PE on channel 0 event */
231 if (retval == ERROR_OK)
232 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
233 /* output restart requests to PE on channel 1 event */
234 if (retval == ERROR_OK)
235 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
236 if (retval != ERROR_OK)
237 return retval;
238
239 /* Resync breakpoint registers */
240
241 return ERROR_OK;
242 }
243
244 /* Write to memory mapped registers directly with no cache or mmu handling */
245 static int aarch64_dap_write_memap_register_u32(struct target *target,
246 uint32_t address,
247 uint32_t value)
248 {
249 int retval;
250 struct armv8_common *armv8 = target_to_armv8(target);
251
252 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
253
254 return retval;
255 }
256
257 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
258 {
259 struct arm_dpm *dpm = &a8->armv8_common.dpm;
260 int retval;
261
262 dpm->arm = &a8->armv8_common.arm;
263 dpm->didr = debug;
264
265 retval = armv8_dpm_setup(dpm);
266 if (retval == ERROR_OK)
267 retval = armv8_dpm_initialize(dpm);
268
269 return retval;
270 }
271
272 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
273 {
274 struct armv8_common *armv8 = target_to_armv8(target);
275 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
276 }
277
278 static int aarch64_check_state_one(struct target *target,
279 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
280 {
281 struct armv8_common *armv8 = target_to_armv8(target);
282 uint32_t prsr;
283 int retval;
284
285 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
286 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
287 if (retval != ERROR_OK)
288 return retval;
289
290 if (p_prsr)
291 *p_prsr = prsr;
292
293 if (p_result)
294 *p_result = (prsr & mask) == (val & mask);
295
296 return ERROR_OK;
297 }
298
299 static int aarch64_wait_halt_one(struct target *target)
300 {
301 int retval = ERROR_OK;
302 uint32_t prsr;
303
304 int64_t then = timeval_ms();
305 for (;;) {
306 int halted;
307
308 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
309 if (retval != ERROR_OK || halted)
310 break;
311
312 if (timeval_ms() > then + 1000) {
313 retval = ERROR_TARGET_TIMEOUT;
314 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
315 break;
316 }
317 }
318 return retval;
319 }
320
321 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
322 {
323 int retval = ERROR_OK;
324 struct target_list *head = target->head;
325 struct target *first = NULL;
326
327 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
328
329 while (head != NULL) {
330 struct target *curr = head->target;
331 struct armv8_common *armv8 = target_to_armv8(curr);
332 head = head->next;
333
334 if (exc_target && curr == target)
335 continue;
336 if (!target_was_examined(curr))
337 continue;
338 if (curr->state != TARGET_RUNNING)
339 continue;
340
341 /* HACK: mark this target as prepared for halting */
342 curr->debug_reason = DBG_REASON_DBGRQ;
343
344 /* open the gate for channel 0 to let HALT requests pass to the CTM */
345 retval = arm_cti_ungate_channel(armv8->cti, 0);
346 if (retval == ERROR_OK)
347 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
348 if (retval != ERROR_OK)
349 break;
350
351 LOG_DEBUG("target %s prepared", target_name(curr));
352
353 if (first == NULL)
354 first = curr;
355 }
356
357 if (p_first) {
358 if (exc_target && first)
359 *p_first = first;
360 else
361 *p_first = target;
362 }
363
364 return retval;
365 }
366
367 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
368 {
369 int retval = ERROR_OK;
370 struct armv8_common *armv8 = target_to_armv8(target);
371
372 LOG_DEBUG("%s", target_name(target));
373
374 /* allow Halting Debug Mode */
375 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
376 if (retval != ERROR_OK)
377 return retval;
378
379 /* trigger an event on channel 0, this outputs a halt request to the PE */
380 retval = arm_cti_pulse_channel(armv8->cti, 0);
381 if (retval != ERROR_OK)
382 return retval;
383
384 if (mode == HALT_SYNC) {
385 retval = aarch64_wait_halt_one(target);
386 if (retval != ERROR_OK) {
387 if (retval == ERROR_TARGET_TIMEOUT)
388 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
389 return retval;
390 }
391 }
392
393 return ERROR_OK;
394 }
395
396 static int aarch64_halt_smp(struct target *target, bool exc_target)
397 {
398 struct target *next = target;
399 int retval;
400
401 /* prepare halt on all PEs of the group */
402 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
403
404 if (exc_target && next == target)
405 return retval;
406
407 /* halt the target PE */
408 if (retval == ERROR_OK)
409 retval = aarch64_halt_one(next, HALT_LAZY);
410
411 if (retval != ERROR_OK)
412 return retval;
413
414 /* wait for all PEs to halt */
415 int64_t then = timeval_ms();
416 for (;;) {
417 bool all_halted = true;
418 struct target_list *head;
419 struct target *curr;
420
421 foreach_smp_target(head, target->head) {
422 int halted;
423
424 curr = head->target;
425
426 if (!target_was_examined(curr))
427 continue;
428
429 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
430 if (retval != ERROR_OK || !halted) {
431 all_halted = false;
432 break;
433 }
434 }
435
436 if (all_halted)
437 break;
438
439 if (timeval_ms() > then + 1000) {
440 retval = ERROR_TARGET_TIMEOUT;
441 break;
442 }
443
444 /*
445 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
446 * and it looks like the CTI's are not connected by a common
447 * trigger matrix. It seems that we need to halt one core in each
448 * cluster explicitly. So if we find that a core has not halted
449 * yet, we trigger an explicit halt for the second cluster.
450 */
451 retval = aarch64_halt_one(curr, HALT_LAZY);
452 if (retval != ERROR_OK)
453 break;
454 }
455
456 return retval;
457 }
458
459 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
460 {
461 struct target *gdb_target = NULL;
462 struct target_list *head;
463 struct target *curr;
464
465 if (debug_reason == DBG_REASON_NOTHALTED) {
466 LOG_DEBUG("Halting remaining targets in SMP group");
467 aarch64_halt_smp(target, true);
468 }
469
470 /* poll all targets in the group, but skip the target that serves GDB */
471 foreach_smp_target(head, target->head) {
472 curr = head->target;
473 /* skip calling context */
474 if (curr == target)
475 continue;
476 if (!target_was_examined(curr))
477 continue;
478 /* skip targets that were already halted */
479 if (curr->state == TARGET_HALTED)
480 continue;
481 /* remember the gdb_service->target */
482 if (curr->gdb_service != NULL)
483 gdb_target = curr->gdb_service->target;
484 /* skip it */
485 if (curr == gdb_target)
486 continue;
487
488 /* avoid recursion in aarch64_poll() */
489 curr->smp = 0;
490 aarch64_poll(curr);
491 curr->smp = 1;
492 }
493
494 /* after all targets were updated, poll the gdb serving target */
495 if (gdb_target != NULL && gdb_target != target)
496 aarch64_poll(gdb_target);
497
498 return ERROR_OK;
499 }
500
501 /*
502 * Aarch64 Run control
503 */
504
505 static int aarch64_poll(struct target *target)
506 {
507 enum target_state prev_target_state;
508 int retval = ERROR_OK;
509 int halted;
510
511 retval = aarch64_check_state_one(target,
512 PRSR_HALT, PRSR_HALT, &halted, NULL);
513 if (retval != ERROR_OK)
514 return retval;
515
516 if (halted) {
517 prev_target_state = target->state;
518 if (prev_target_state != TARGET_HALTED) {
519 enum target_debug_reason debug_reason = target->debug_reason;
520
521 /* We have a halting debug event */
522 target->state = TARGET_HALTED;
523 LOG_DEBUG("Target %s halted", target_name(target));
524 retval = aarch64_debug_entry(target);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if (target->smp)
529 update_halt_gdb(target, debug_reason);
530
531 if (arm_semihosting(target, &retval) != 0)
532 return retval;
533
534 switch (prev_target_state) {
535 case TARGET_RUNNING:
536 case TARGET_UNKNOWN:
537 case TARGET_RESET:
538 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
539 break;
540 case TARGET_DEBUG_RUNNING:
541 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
542 break;
543 default:
544 break;
545 }
546 }
547 } else
548 target->state = TARGET_RUNNING;
549
550 return retval;
551 }
552
553 static int aarch64_halt(struct target *target)
554 {
555 struct armv8_common *armv8 = target_to_armv8(target);
556 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
557
558 if (target->smp)
559 return aarch64_halt_smp(target, false);
560
561 return aarch64_halt_one(target, HALT_SYNC);
562 }
563
564 static int aarch64_restore_one(struct target *target, int current,
565 uint64_t *address, int handle_breakpoints, int debug_execution)
566 {
567 struct armv8_common *armv8 = target_to_armv8(target);
568 struct arm *arm = &armv8->arm;
569 int retval;
570 uint64_t resume_pc;
571
572 LOG_DEBUG("%s", target_name(target));
573
574 if (!debug_execution)
575 target_free_all_working_areas(target);
576
577 /* current = 1: continue on current pc, otherwise continue at <address> */
578 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
579 if (!current)
580 resume_pc = *address;
581 else
582 *address = resume_pc;
583
584 /* Make sure that the Armv7 gdb thumb fixups does not
585 * kill the return address
586 */
587 switch (arm->core_state) {
588 case ARM_STATE_ARM:
589 resume_pc &= 0xFFFFFFFC;
590 break;
591 case ARM_STATE_AARCH64:
592 resume_pc &= 0xFFFFFFFFFFFFFFFC;
593 break;
594 case ARM_STATE_THUMB:
595 case ARM_STATE_THUMB_EE:
596 /* When the return address is loaded into PC
597 * bit 0 must be 1 to stay in Thumb state
598 */
599 resume_pc |= 0x1;
600 break;
601 case ARM_STATE_JAZELLE:
602 LOG_ERROR("How do I resume into Jazelle state??");
603 return ERROR_FAIL;
604 }
605 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
606 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
607 arm->pc->dirty = true;
608 arm->pc->valid = true;
609
610 /* called it now before restoring context because it uses cpu
611 * register r0 for restoring system control register */
612 retval = aarch64_restore_system_control_reg(target);
613 if (retval == ERROR_OK)
614 retval = aarch64_restore_context(target, handle_breakpoints);
615
616 return retval;
617 }
618
619 /**
620 * prepare single target for restart
621 *
622 *
623 */
624 static int aarch64_prepare_restart_one(struct target *target)
625 {
626 struct armv8_common *armv8 = target_to_armv8(target);
627 int retval;
628 uint32_t dscr;
629 uint32_t tmp;
630
631 LOG_DEBUG("%s", target_name(target));
632
633 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
635 if (retval != ERROR_OK)
636 return retval;
637
638 if ((dscr & DSCR_ITE) == 0)
639 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
640 if ((dscr & DSCR_ERR) != 0)
641 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
642
643 /* acknowledge a pending CTI halt event */
644 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
645 /*
646 * open the CTI gate for channel 1 so that the restart events
647 * get passed along to all PEs. Also close gate for channel 0
648 * to isolate the PE from halt events.
649 */
650 if (retval == ERROR_OK)
651 retval = arm_cti_ungate_channel(armv8->cti, 1);
652 if (retval == ERROR_OK)
653 retval = arm_cti_gate_channel(armv8->cti, 0);
654
655 /* make sure that DSCR.HDE is set */
656 if (retval == ERROR_OK) {
657 dscr |= DSCR_HDE;
658 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
659 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
660 }
661
662 if (retval == ERROR_OK) {
663 /* clear sticky bits in PRSR, SDR is now 0 */
664 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
665 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
666 }
667
668 return retval;
669 }
670
671 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
672 {
673 struct armv8_common *armv8 = target_to_armv8(target);
674 int retval;
675
676 LOG_DEBUG("%s", target_name(target));
677
678 /* trigger an event on channel 1, generates a restart request to the PE */
679 retval = arm_cti_pulse_channel(armv8->cti, 1);
680 if (retval != ERROR_OK)
681 return retval;
682
683 if (mode == RESTART_SYNC) {
684 int64_t then = timeval_ms();
685 for (;;) {
686 int resumed;
687 /*
688 * if PRSR.SDR is set now, the target did restart, even
689 * if it's now already halted again (e.g. due to breakpoint)
690 */
691 retval = aarch64_check_state_one(target,
692 PRSR_SDR, PRSR_SDR, &resumed, NULL);
693 if (retval != ERROR_OK || resumed)
694 break;
695
696 if (timeval_ms() > then + 1000) {
697 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
698 retval = ERROR_TARGET_TIMEOUT;
699 break;
700 }
701 }
702 }
703
704 if (retval != ERROR_OK)
705 return retval;
706
707 target->debug_reason = DBG_REASON_NOTHALTED;
708 target->state = TARGET_RUNNING;
709
710 return ERROR_OK;
711 }
712
713 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
714 {
715 int retval;
716
717 LOG_DEBUG("%s", target_name(target));
718
719 retval = aarch64_prepare_restart_one(target);
720 if (retval == ERROR_OK)
721 retval = aarch64_do_restart_one(target, mode);
722
723 return retval;
724 }
725
726 /*
727 * prepare all but the current target for restart
728 */
729 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
730 {
731 int retval = ERROR_OK;
732 struct target_list *head;
733 struct target *first = NULL;
734 uint64_t address;
735
736 foreach_smp_target(head, target->head) {
737 struct target *curr = head->target;
738
739 /* skip calling target */
740 if (curr == target)
741 continue;
742 if (!target_was_examined(curr))
743 continue;
744 if (curr->state != TARGET_HALTED)
745 continue;
746
747 /* resume at current address, not in step mode */
748 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
749 if (retval == ERROR_OK)
750 retval = aarch64_prepare_restart_one(curr);
751 if (retval != ERROR_OK) {
752 LOG_ERROR("failed to restore target %s", target_name(curr));
753 break;
754 }
755 /* remember the first valid target in the group */
756 if (first == NULL)
757 first = curr;
758 }
759
760 if (p_first)
761 *p_first = first;
762
763 return retval;
764 }
765
766
767 static int aarch64_step_restart_smp(struct target *target)
768 {
769 int retval = ERROR_OK;
770 struct target_list *head;
771 struct target *first = NULL;
772
773 LOG_DEBUG("%s", target_name(target));
774
775 retval = aarch64_prep_restart_smp(target, 0, &first);
776 if (retval != ERROR_OK)
777 return retval;
778
779 if (first != NULL)
780 retval = aarch64_do_restart_one(first, RESTART_LAZY);
781 if (retval != ERROR_OK) {
782 LOG_DEBUG("error restarting target %s", target_name(first));
783 return retval;
784 }
785
786 int64_t then = timeval_ms();
787 for (;;) {
788 struct target *curr = target;
789 bool all_resumed = true;
790
791 foreach_smp_target(head, target->head) {
792 uint32_t prsr;
793 int resumed;
794
795 curr = head->target;
796
797 if (curr == target)
798 continue;
799
800 if (!target_was_examined(curr))
801 continue;
802
803 retval = aarch64_check_state_one(curr,
804 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
805 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
806 all_resumed = false;
807 break;
808 }
809
810 if (curr->state != TARGET_RUNNING) {
811 curr->state = TARGET_RUNNING;
812 curr->debug_reason = DBG_REASON_NOTHALTED;
813 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
814 }
815 }
816
817 if (all_resumed)
818 break;
819
820 if (timeval_ms() > then + 1000) {
821 LOG_ERROR("%s: timeout waiting for target resume", __func__);
822 retval = ERROR_TARGET_TIMEOUT;
823 break;
824 }
825 /*
826 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
827 * and it looks like the CTI's are not connected by a common
828 * trigger matrix. It seems that we need to halt one core in each
829 * cluster explicitly. So if we find that a core has not halted
830 * yet, we trigger an explicit resume for the second cluster.
831 */
832 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
833 if (retval != ERROR_OK)
834 break;
835 }
836
837 return retval;
838 }
839
840 static int aarch64_resume(struct target *target, int current,
841 target_addr_t address, int handle_breakpoints, int debug_execution)
842 {
843 int retval = 0;
844 uint64_t addr = address;
845
846 struct armv8_common *armv8 = target_to_armv8(target);
847 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
848
849 if (target->state != TARGET_HALTED)
850 return ERROR_TARGET_NOT_HALTED;
851
852 /*
853 * If this target is part of a SMP group, prepare the others
854 * targets for resuming. This involves restoring the complete
855 * target register context and setting up CTI gates to accept
856 * resume events from the trigger matrix.
857 */
858 if (target->smp) {
859 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
860 if (retval != ERROR_OK)
861 return retval;
862 }
863
864 /* all targets prepared, restore and restart the current target */
865 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
866 debug_execution);
867 if (retval == ERROR_OK)
868 retval = aarch64_restart_one(target, RESTART_SYNC);
869 if (retval != ERROR_OK)
870 return retval;
871
872 if (target->smp) {
873 int64_t then = timeval_ms();
874 for (;;) {
875 struct target *curr = target;
876 struct target_list *head;
877 bool all_resumed = true;
878
879 foreach_smp_target(head, target->head) {
880 uint32_t prsr;
881 int resumed;
882
883 curr = head->target;
884 if (curr == target)
885 continue;
886 if (!target_was_examined(curr))
887 continue;
888
889 retval = aarch64_check_state_one(curr,
890 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
891 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
892 all_resumed = false;
893 break;
894 }
895
896 if (curr->state != TARGET_RUNNING) {
897 curr->state = TARGET_RUNNING;
898 curr->debug_reason = DBG_REASON_NOTHALTED;
899 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
900 }
901 }
902
903 if (all_resumed)
904 break;
905
906 if (timeval_ms() > then + 1000) {
907 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
908 retval = ERROR_TARGET_TIMEOUT;
909 break;
910 }
911
912 /*
913 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
914 * and it looks like the CTI's are not connected by a common
915 * trigger matrix. It seems that we need to halt one core in each
916 * cluster explicitly. So if we find that a core has not halted
917 * yet, we trigger an explicit resume for the second cluster.
918 */
919 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
920 if (retval != ERROR_OK)
921 break;
922 }
923 }
924
925 if (retval != ERROR_OK)
926 return retval;
927
928 target->debug_reason = DBG_REASON_NOTHALTED;
929
930 if (!debug_execution) {
931 target->state = TARGET_RUNNING;
932 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
933 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
934 } else {
935 target->state = TARGET_DEBUG_RUNNING;
936 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
937 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
938 }
939
940 return ERROR_OK;
941 }
942
943 static int aarch64_debug_entry(struct target *target)
944 {
945 int retval = ERROR_OK;
946 struct armv8_common *armv8 = target_to_armv8(target);
947 struct arm_dpm *dpm = &armv8->dpm;
948 enum arm_state core_state;
949 uint32_t dscr;
950
951 /* make sure to clear all sticky errors */
952 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
953 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
954 if (retval == ERROR_OK)
955 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
956 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
957 if (retval == ERROR_OK)
958 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
959
960 if (retval != ERROR_OK)
961 return retval;
962
963 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
964
965 dpm->dscr = dscr;
966 core_state = armv8_dpm_get_core_state(dpm);
967 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
968 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
969
970 /* close the CTI gate for all events */
971 if (retval == ERROR_OK)
972 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
973 /* discard async exceptions */
974 if (retval == ERROR_OK)
975 retval = dpm->instr_cpsr_sync(dpm);
976 if (retval != ERROR_OK)
977 return retval;
978
979 /* Examine debug reason */
980 armv8_dpm_report_dscr(dpm, dscr);
981
982 /* save address of instruction that triggered the watchpoint? */
983 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
984 uint32_t tmp;
985 uint64_t wfar = 0;
986
987 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
988 armv8->debug_base + CPUV8_DBG_WFAR1,
989 &tmp);
990 if (retval != ERROR_OK)
991 return retval;
992 wfar = tmp;
993 wfar = (wfar << 32);
994 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
995 armv8->debug_base + CPUV8_DBG_WFAR0,
996 &tmp);
997 if (retval != ERROR_OK)
998 return retval;
999 wfar |= tmp;
1000 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1001 }
1002
1003 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1004
1005 if (retval == ERROR_OK && armv8->post_debug_entry)
1006 retval = armv8->post_debug_entry(target);
1007
1008 return retval;
1009 }
1010
1011 static int aarch64_post_debug_entry(struct target *target)
1012 {
1013 struct aarch64_common *aarch64 = target_to_aarch64(target);
1014 struct armv8_common *armv8 = &aarch64->armv8_common;
1015 int retval;
1016 enum arm_mode target_mode = ARM_MODE_ANY;
1017 uint32_t instr;
1018
1019 switch (armv8->arm.core_mode) {
1020 case ARMV8_64_EL0T:
1021 target_mode = ARMV8_64_EL1H;
1022 /* fall through */
1023 case ARMV8_64_EL1T:
1024 case ARMV8_64_EL1H:
1025 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1026 break;
1027 case ARMV8_64_EL2T:
1028 case ARMV8_64_EL2H:
1029 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1030 break;
1031 case ARMV8_64_EL3H:
1032 case ARMV8_64_EL3T:
1033 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1034 break;
1035
1036 case ARM_MODE_SVC:
1037 case ARM_MODE_ABT:
1038 case ARM_MODE_FIQ:
1039 case ARM_MODE_IRQ:
1040 case ARM_MODE_HYP:
1041 case ARM_MODE_SYS:
1042 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1043 break;
1044
1045 default:
1046 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1047 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1048 return ERROR_FAIL;
1049 }
1050
1051 if (target_mode != ARM_MODE_ANY)
1052 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1053
1054 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1055 if (retval != ERROR_OK)
1056 return retval;
1057
1058 if (target_mode != ARM_MODE_ANY)
1059 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1060
1061 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1062 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1063
1064 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1065 armv8_identify_cache(armv8);
1066 armv8_read_mpidr(armv8);
1067 }
1068
1069 armv8->armv8_mmu.mmu_enabled =
1070 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1071 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1072 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1073 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1074 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1075 return ERROR_OK;
1076 }
1077
1078 /*
1079 * single-step a target
1080 */
1081 static int aarch64_step(struct target *target, int current, target_addr_t address,
1082 int handle_breakpoints)
1083 {
1084 struct armv8_common *armv8 = target_to_armv8(target);
1085 struct aarch64_common *aarch64 = target_to_aarch64(target);
1086 int saved_retval = ERROR_OK;
1087 int retval;
1088 uint32_t edecr;
1089
1090 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1091
1092 if (target->state != TARGET_HALTED) {
1093 LOG_WARNING("target not halted");
1094 return ERROR_TARGET_NOT_HALTED;
1095 }
1096
1097 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1098 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1099 /* make sure EDECR.SS is not set when restoring the register */
1100
1101 if (retval == ERROR_OK) {
1102 edecr &= ~0x4;
1103 /* set EDECR.SS to enter hardware step mode */
1104 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1105 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1106 }
1107 /* disable interrupts while stepping */
1108 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1109 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1110 /* bail out if stepping setup has failed */
1111 if (retval != ERROR_OK)
1112 return retval;
1113
1114 if (target->smp && (current == 1)) {
1115 /*
1116 * isolate current target so that it doesn't get resumed
1117 * together with the others
1118 */
1119 retval = arm_cti_gate_channel(armv8->cti, 1);
1120 /* resume all other targets in the group */
1121 if (retval == ERROR_OK)
1122 retval = aarch64_step_restart_smp(target);
1123 if (retval != ERROR_OK) {
1124 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1125 return retval;
1126 }
1127 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1128 }
1129
1130 /* all other targets running, restore and restart the current target */
1131 retval = aarch64_restore_one(target, current, &address, 0, 0);
1132 if (retval == ERROR_OK)
1133 retval = aarch64_restart_one(target, RESTART_LAZY);
1134
1135 if (retval != ERROR_OK)
1136 return retval;
1137
1138 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1139 if (!handle_breakpoints)
1140 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1141
1142 int64_t then = timeval_ms();
1143 for (;;) {
1144 int stepped;
1145 uint32_t prsr;
1146
1147 retval = aarch64_check_state_one(target,
1148 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1149 if (retval != ERROR_OK || stepped)
1150 break;
1151
1152 if (timeval_ms() > then + 100) {
1153 LOG_ERROR("timeout waiting for target %s halt after step",
1154 target_name(target));
1155 retval = ERROR_TARGET_TIMEOUT;
1156 break;
1157 }
1158 }
1159
1160 /*
1161 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1162 * causes a timeout. The core takes the step but doesn't complete it and so
1163 * debug state is never entered. However, you can manually halt the core
1164 * as an external debug even is also a WFI wakeup event.
1165 */
1166 if (retval == ERROR_TARGET_TIMEOUT)
1167 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1168
1169 /* restore EDECR */
1170 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1171 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1172 if (retval != ERROR_OK)
1173 return retval;
1174
1175 /* restore interrupts */
1176 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1177 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1178 if (retval != ERROR_OK)
1179 return ERROR_OK;
1180 }
1181
1182 if (saved_retval != ERROR_OK)
1183 return saved_retval;
1184
1185 return ERROR_OK;
1186 }
1187
1188 static int aarch64_restore_context(struct target *target, bool bpwp)
1189 {
1190 struct armv8_common *armv8 = target_to_armv8(target);
1191 struct arm *arm = &armv8->arm;
1192
1193 int retval;
1194
1195 LOG_DEBUG("%s", target_name(target));
1196
1197 if (armv8->pre_restore_context)
1198 armv8->pre_restore_context(target);
1199
1200 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1201 if (retval == ERROR_OK) {
1202 /* registers are now invalid */
1203 register_cache_invalidate(arm->core_cache);
1204 register_cache_invalidate(arm->core_cache->next);
1205 }
1206
1207 return retval;
1208 }
1209
1210 /*
1211 * Cortex-A8 Breakpoint and watchpoint functions
1212 */
1213
1214 /* Setup hardware Breakpoint Register Pair */
1215 static int aarch64_set_breakpoint(struct target *target,
1216 struct breakpoint *breakpoint, uint8_t matchmode)
1217 {
1218 int retval;
1219 int brp_i = 0;
1220 uint32_t control;
1221 uint8_t byte_addr_select = 0x0F;
1222 struct aarch64_common *aarch64 = target_to_aarch64(target);
1223 struct armv8_common *armv8 = &aarch64->armv8_common;
1224 struct aarch64_brp *brp_list = aarch64->brp_list;
1225
1226 if (breakpoint->set) {
1227 LOG_WARNING("breakpoint already set");
1228 return ERROR_OK;
1229 }
1230
1231 if (breakpoint->type == BKPT_HARD) {
1232 int64_t bpt_value;
1233 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1234 brp_i++;
1235 if (brp_i >= aarch64->brp_num) {
1236 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1238 }
1239 breakpoint->set = brp_i + 1;
1240 if (breakpoint->length == 2)
1241 byte_addr_select = (3 << (breakpoint->address & 0x02));
1242 control = ((matchmode & 0x7) << 20)
1243 | (1 << 13)
1244 | (byte_addr_select << 5)
1245 | (3 << 1) | 1;
1246 brp_list[brp_i].used = 1;
1247 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1248 brp_list[brp_i].control = control;
1249 bpt_value = brp_list[brp_i].value;
1250
1251 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1252 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1253 (uint32_t)(bpt_value & 0xFFFFFFFF));
1254 if (retval != ERROR_OK)
1255 return retval;
1256 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1257 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1258 (uint32_t)(bpt_value >> 32));
1259 if (retval != ERROR_OK)
1260 return retval;
1261
1262 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1263 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1264 brp_list[brp_i].control);
1265 if (retval != ERROR_OK)
1266 return retval;
1267 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1268 brp_list[brp_i].control,
1269 brp_list[brp_i].value);
1270
1271 } else if (breakpoint->type == BKPT_SOFT) {
1272 uint32_t opcode;
1273 uint8_t code[4];
1274
1275 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1276 opcode = ARMV8_HLT(11);
1277
1278 if (breakpoint->length != 4)
1279 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1280 } else {
1281 /**
1282 * core_state is ARM_STATE_ARM
1283 * in that case the opcode depends on breakpoint length:
1284 * - if length == 4 => A32 opcode
1285 * - if length == 2 => T32 opcode
1286 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1287 * in that case the length should be changed from 3 to 4 bytes
1288 **/
1289 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1290 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1291
1292 if (breakpoint->length == 3)
1293 breakpoint->length = 4;
1294 }
1295
1296 buf_set_u32(code, 0, 32, opcode);
1297
1298 retval = target_read_memory(target,
1299 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1300 breakpoint->length, 1,
1301 breakpoint->orig_instr);
1302 if (retval != ERROR_OK)
1303 return retval;
1304
1305 armv8_cache_d_inner_flush_virt(armv8,
1306 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1307 breakpoint->length);
1308
1309 retval = target_write_memory(target,
1310 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1311 breakpoint->length, 1, code);
1312 if (retval != ERROR_OK)
1313 return retval;
1314
1315 armv8_cache_d_inner_flush_virt(armv8,
1316 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1317 breakpoint->length);
1318
1319 armv8_cache_i_inner_inval_virt(armv8,
1320 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1321 breakpoint->length);
1322
1323 breakpoint->set = 0x11; /* Any nice value but 0 */
1324 }
1325
1326 /* Ensure that halting debug mode is enable */
1327 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1328 if (retval != ERROR_OK) {
1329 LOG_DEBUG("Failed to set DSCR.HDE");
1330 return retval;
1331 }
1332
1333 return ERROR_OK;
1334 }
1335
1336 static int aarch64_set_context_breakpoint(struct target *target,
1337 struct breakpoint *breakpoint, uint8_t matchmode)
1338 {
1339 int retval = ERROR_FAIL;
1340 int brp_i = 0;
1341 uint32_t control;
1342 uint8_t byte_addr_select = 0x0F;
1343 struct aarch64_common *aarch64 = target_to_aarch64(target);
1344 struct armv8_common *armv8 = &aarch64->armv8_common;
1345 struct aarch64_brp *brp_list = aarch64->brp_list;
1346
1347 if (breakpoint->set) {
1348 LOG_WARNING("breakpoint already set");
1349 return retval;
1350 }
1351 /*check available context BRPs*/
1352 while ((brp_list[brp_i].used ||
1353 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1354 brp_i++;
1355
1356 if (brp_i >= aarch64->brp_num) {
1357 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1358 return ERROR_FAIL;
1359 }
1360
1361 breakpoint->set = brp_i + 1;
1362 control = ((matchmode & 0x7) << 20)
1363 | (1 << 13)
1364 | (byte_addr_select << 5)
1365 | (3 << 1) | 1;
1366 brp_list[brp_i].used = 1;
1367 brp_list[brp_i].value = (breakpoint->asid);
1368 brp_list[brp_i].control = control;
1369 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1370 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1371 brp_list[brp_i].value);
1372 if (retval != ERROR_OK)
1373 return retval;
1374 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1375 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1376 brp_list[brp_i].control);
1377 if (retval != ERROR_OK)
1378 return retval;
1379 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1380 brp_list[brp_i].control,
1381 brp_list[brp_i].value);
1382 return ERROR_OK;
1383
1384 }
1385
1386 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1387 {
1388 int retval = ERROR_FAIL;
1389 int brp_1 = 0; /* holds the contextID pair */
1390 int brp_2 = 0; /* holds the IVA pair */
1391 uint32_t control_CTX, control_IVA;
1392 uint8_t CTX_byte_addr_select = 0x0F;
1393 uint8_t IVA_byte_addr_select = 0x0F;
1394 uint8_t CTX_machmode = 0x03;
1395 uint8_t IVA_machmode = 0x01;
1396 struct aarch64_common *aarch64 = target_to_aarch64(target);
1397 struct armv8_common *armv8 = &aarch64->armv8_common;
1398 struct aarch64_brp *brp_list = aarch64->brp_list;
1399
1400 if (breakpoint->set) {
1401 LOG_WARNING("breakpoint already set");
1402 return retval;
1403 }
1404 /*check available context BRPs*/
1405 while ((brp_list[brp_1].used ||
1406 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1407 brp_1++;
1408
1409 printf("brp(CTX) found num: %d\n", brp_1);
1410 if (brp_1 >= aarch64->brp_num) {
1411 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1412 return ERROR_FAIL;
1413 }
1414
1415 while ((brp_list[brp_2].used ||
1416 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1417 brp_2++;
1418
1419 printf("brp(IVA) found num: %d\n", brp_2);
1420 if (brp_2 >= aarch64->brp_num) {
1421 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1422 return ERROR_FAIL;
1423 }
1424
1425 breakpoint->set = brp_1 + 1;
1426 breakpoint->linked_BRP = brp_2;
1427 control_CTX = ((CTX_machmode & 0x7) << 20)
1428 | (brp_2 << 16)
1429 | (0 << 14)
1430 | (CTX_byte_addr_select << 5)
1431 | (3 << 1) | 1;
1432 brp_list[brp_1].used = 1;
1433 brp_list[brp_1].value = (breakpoint->asid);
1434 brp_list[brp_1].control = control_CTX;
1435 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1436 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1437 brp_list[brp_1].value);
1438 if (retval != ERROR_OK)
1439 return retval;
1440 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1441 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1442 brp_list[brp_1].control);
1443 if (retval != ERROR_OK)
1444 return retval;
1445
1446 control_IVA = ((IVA_machmode & 0x7) << 20)
1447 | (brp_1 << 16)
1448 | (1 << 13)
1449 | (IVA_byte_addr_select << 5)
1450 | (3 << 1) | 1;
1451 brp_list[brp_2].used = 1;
1452 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1453 brp_list[brp_2].control = control_IVA;
1454 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1455 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1456 brp_list[brp_2].value & 0xFFFFFFFF);
1457 if (retval != ERROR_OK)
1458 return retval;
1459 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1460 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1461 brp_list[brp_2].value >> 32);
1462 if (retval != ERROR_OK)
1463 return retval;
1464 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1465 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1466 brp_list[brp_2].control);
1467 if (retval != ERROR_OK)
1468 return retval;
1469
1470 return ERROR_OK;
1471 }
1472
1473 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1474 {
1475 int retval;
1476 struct aarch64_common *aarch64 = target_to_aarch64(target);
1477 struct armv8_common *armv8 = &aarch64->armv8_common;
1478 struct aarch64_brp *brp_list = aarch64->brp_list;
1479
1480 if (!breakpoint->set) {
1481 LOG_WARNING("breakpoint not set");
1482 return ERROR_OK;
1483 }
1484
1485 if (breakpoint->type == BKPT_HARD) {
1486 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1487 int brp_i = breakpoint->set - 1;
1488 int brp_j = breakpoint->linked_BRP;
1489 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1490 LOG_DEBUG("Invalid BRP number in breakpoint");
1491 return ERROR_OK;
1492 }
1493 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1494 brp_list[brp_i].control, brp_list[brp_i].value);
1495 brp_list[brp_i].used = 0;
1496 brp_list[brp_i].value = 0;
1497 brp_list[brp_i].control = 0;
1498 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1499 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1500 brp_list[brp_i].control);
1501 if (retval != ERROR_OK)
1502 return retval;
1503 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1504 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1505 (uint32_t)brp_list[brp_i].value);
1506 if (retval != ERROR_OK)
1507 return retval;
1508 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1509 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1510 (uint32_t)brp_list[brp_i].value);
1511 if (retval != ERROR_OK)
1512 return retval;
1513 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1514 LOG_DEBUG("Invalid BRP number in breakpoint");
1515 return ERROR_OK;
1516 }
1517 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1518 brp_list[brp_j].control, brp_list[brp_j].value);
1519 brp_list[brp_j].used = 0;
1520 brp_list[brp_j].value = 0;
1521 brp_list[brp_j].control = 0;
1522 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1523 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1524 brp_list[brp_j].control);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1528 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1529 (uint32_t)brp_list[brp_j].value);
1530 if (retval != ERROR_OK)
1531 return retval;
1532 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1533 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1534 (uint32_t)brp_list[brp_j].value);
1535 if (retval != ERROR_OK)
1536 return retval;
1537
1538 breakpoint->linked_BRP = 0;
1539 breakpoint->set = 0;
1540 return ERROR_OK;
1541
1542 } else {
1543 int brp_i = breakpoint->set - 1;
1544 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1545 LOG_DEBUG("Invalid BRP number in breakpoint");
1546 return ERROR_OK;
1547 }
1548 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1549 brp_list[brp_i].control, brp_list[brp_i].value);
1550 brp_list[brp_i].used = 0;
1551 brp_list[brp_i].value = 0;
1552 brp_list[brp_i].control = 0;
1553 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1554 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1555 brp_list[brp_i].control);
1556 if (retval != ERROR_OK)
1557 return retval;
1558 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1559 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1560 brp_list[brp_i].value);
1561 if (retval != ERROR_OK)
1562 return retval;
1563
1564 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1565 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1566 (uint32_t)brp_list[brp_i].value);
1567 if (retval != ERROR_OK)
1568 return retval;
1569 breakpoint->set = 0;
1570 return ERROR_OK;
1571 }
1572 } else {
1573 /* restore original instruction (kept in target endianness) */
1574
1575 armv8_cache_d_inner_flush_virt(armv8,
1576 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1577 breakpoint->length);
1578
1579 if (breakpoint->length == 4) {
1580 retval = target_write_memory(target,
1581 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1582 4, 1, breakpoint->orig_instr);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 } else {
1586 retval = target_write_memory(target,
1587 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1588 2, 1, breakpoint->orig_instr);
1589 if (retval != ERROR_OK)
1590 return retval;
1591 }
1592
1593 armv8_cache_d_inner_flush_virt(armv8,
1594 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1595 breakpoint->length);
1596
1597 armv8_cache_i_inner_inval_virt(armv8,
1598 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1599 breakpoint->length);
1600 }
1601 breakpoint->set = 0;
1602
1603 return ERROR_OK;
1604 }
1605
1606 static int aarch64_add_breakpoint(struct target *target,
1607 struct breakpoint *breakpoint)
1608 {
1609 struct aarch64_common *aarch64 = target_to_aarch64(target);
1610
1611 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1612 LOG_INFO("no hardware breakpoint available");
1613 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1614 }
1615
1616 if (breakpoint->type == BKPT_HARD)
1617 aarch64->brp_num_available--;
1618
1619 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1620 }
1621
1622 static int aarch64_add_context_breakpoint(struct target *target,
1623 struct breakpoint *breakpoint)
1624 {
1625 struct aarch64_common *aarch64 = target_to_aarch64(target);
1626
1627 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1628 LOG_INFO("no hardware breakpoint available");
1629 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1630 }
1631
1632 if (breakpoint->type == BKPT_HARD)
1633 aarch64->brp_num_available--;
1634
1635 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1636 }
1637
1638 static int aarch64_add_hybrid_breakpoint(struct target *target,
1639 struct breakpoint *breakpoint)
1640 {
1641 struct aarch64_common *aarch64 = target_to_aarch64(target);
1642
1643 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1644 LOG_INFO("no hardware breakpoint available");
1645 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1646 }
1647
1648 if (breakpoint->type == BKPT_HARD)
1649 aarch64->brp_num_available--;
1650
1651 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1652 }
1653
1654
1655 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1656 {
1657 struct aarch64_common *aarch64 = target_to_aarch64(target);
1658
1659 #if 0
1660 /* It is perfectly possible to remove breakpoints while the target is running */
1661 if (target->state != TARGET_HALTED) {
1662 LOG_WARNING("target not halted");
1663 return ERROR_TARGET_NOT_HALTED;
1664 }
1665 #endif
1666
1667 if (breakpoint->set) {
1668 aarch64_unset_breakpoint(target, breakpoint);
1669 if (breakpoint->type == BKPT_HARD)
1670 aarch64->brp_num_available++;
1671 }
1672
1673 return ERROR_OK;
1674 }
1675
1676 /*
1677 * Cortex-A8 Reset functions
1678 */
1679
1680 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1681 {
1682 struct armv8_common *armv8 = target_to_armv8(target);
1683 uint32_t edecr;
1684 int retval;
1685
1686 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1687 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1688 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1689 if (retval != ERROR_OK)
1690 return retval;
1691
1692 if (enable)
1693 edecr |= ECR_RCE;
1694 else
1695 edecr &= ~ECR_RCE;
1696
1697 return mem_ap_write_atomic_u32(armv8->debug_ap,
1698 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1699 }
1700
1701 static int aarch64_clear_reset_catch(struct target *target)
1702 {
1703 struct armv8_common *armv8 = target_to_armv8(target);
1704 uint32_t edesr;
1705 int retval;
1706 bool was_triggered;
1707
1708 /* check if Reset Catch debug event triggered as expected */
1709 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1710 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1711 if (retval != ERROR_OK)
1712 return retval;
1713
1714 was_triggered = !!(edesr & ESR_RC);
1715 LOG_DEBUG("Reset Catch debug event %s",
1716 was_triggered ? "triggered" : "NOT triggered!");
1717
1718 if (was_triggered) {
1719 /* clear pending Reset Catch debug event */
1720 edesr &= ~ESR_RC;
1721 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1722 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1723 if (retval != ERROR_OK)
1724 return retval;
1725 }
1726
1727 return ERROR_OK;
1728 }
1729
1730 static int aarch64_assert_reset(struct target *target)
1731 {
1732 struct armv8_common *armv8 = target_to_armv8(target);
1733 enum reset_types reset_config = jtag_get_reset_config();
1734 int retval;
1735
1736 LOG_DEBUG(" ");
1737
1738 /* Issue some kind of warm reset. */
1739 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1740 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1741 else if (reset_config & RESET_HAS_SRST) {
1742 bool srst_asserted = false;
1743
1744 if (target->reset_halt) {
1745 if (target_was_examined(target)) {
1746
1747 if (reset_config & RESET_SRST_NO_GATING) {
1748 /*
1749 * SRST needs to be asserted *before* Reset Catch
1750 * debug event can be set up.
1751 */
1752 adapter_assert_reset();
1753 srst_asserted = true;
1754
1755 /* make sure to clear all sticky errors */
1756 mem_ap_write_atomic_u32(armv8->debug_ap,
1757 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1758 }
1759
1760 /* set up Reset Catch debug event to halt the CPU after reset */
1761 retval = aarch64_enable_reset_catch(target, true);
1762 if (retval != ERROR_OK)
1763 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1764 target_name(target));
1765 } else {
1766 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1767 target_name(target));
1768 }
1769 }
1770
1771 /* REVISIT handle "pulls" cases, if there's
1772 * hardware that needs them to work.
1773 */
1774 if (!srst_asserted)
1775 adapter_assert_reset();
1776 } else {
1777 LOG_ERROR("%s: how to reset?", target_name(target));
1778 return ERROR_FAIL;
1779 }
1780
1781 /* registers are now invalid */
1782 if (target_was_examined(target)) {
1783 register_cache_invalidate(armv8->arm.core_cache);
1784 register_cache_invalidate(armv8->arm.core_cache->next);
1785 }
1786
1787 target->state = TARGET_RESET;
1788
1789 return ERROR_OK;
1790 }
1791
1792 static int aarch64_deassert_reset(struct target *target)
1793 {
1794 int retval;
1795
1796 LOG_DEBUG(" ");
1797
1798 /* be certain SRST is off */
1799 adapter_deassert_reset();
1800
1801 if (!target_was_examined(target))
1802 return ERROR_OK;
1803
1804 retval = aarch64_init_debug_access(target);
1805 if (retval != ERROR_OK)
1806 return retval;
1807
1808 retval = aarch64_poll(target);
1809 if (retval != ERROR_OK)
1810 return retval;
1811
1812 if (target->reset_halt) {
1813 /* clear pending Reset Catch debug event */
1814 retval = aarch64_clear_reset_catch(target);
1815 if (retval != ERROR_OK)
1816 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
1817 target_name(target));
1818
1819 /* disable Reset Catch debug event */
1820 retval = aarch64_enable_reset_catch(target, false);
1821 if (retval != ERROR_OK)
1822 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
1823 target_name(target));
1824
1825 if (target->state != TARGET_HALTED) {
1826 LOG_WARNING("%s: ran after reset and before halt ...",
1827 target_name(target));
1828 retval = target_halt(target);
1829 if (retval != ERROR_OK)
1830 return retval;
1831 }
1832 }
1833
1834 return ERROR_OK;
1835 }
1836
1837 static int aarch64_write_cpu_memory_slow(struct target *target,
1838 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1839 {
1840 struct armv8_common *armv8 = target_to_armv8(target);
1841 struct arm_dpm *dpm = &armv8->dpm;
1842 struct arm *arm = &armv8->arm;
1843 int retval;
1844
1845 armv8_reg_current(arm, 1)->dirty = true;
1846
1847 /* change DCC to normal mode if necessary */
1848 if (*dscr & DSCR_MA) {
1849 *dscr &= ~DSCR_MA;
1850 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1851 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1852 if (retval != ERROR_OK)
1853 return retval;
1854 }
1855
1856 while (count) {
1857 uint32_t data, opcode;
1858
1859 /* write the data to store into DTRRX */
1860 if (size == 1)
1861 data = *buffer;
1862 else if (size == 2)
1863 data = target_buffer_get_u16(target, buffer);
1864 else
1865 data = target_buffer_get_u32(target, buffer);
1866 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1867 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1868 if (retval != ERROR_OK)
1869 return retval;
1870
1871 if (arm->core_state == ARM_STATE_AARCH64)
1872 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1873 else
1874 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1875 if (retval != ERROR_OK)
1876 return retval;
1877
1878 if (size == 1)
1879 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1880 else if (size == 2)
1881 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1882 else
1883 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1884 retval = dpm->instr_execute(dpm, opcode);
1885 if (retval != ERROR_OK)
1886 return retval;
1887
1888 /* Advance */
1889 buffer += size;
1890 --count;
1891 }
1892
1893 return ERROR_OK;
1894 }
1895
1896 static int aarch64_write_cpu_memory_fast(struct target *target,
1897 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1898 {
1899 struct armv8_common *armv8 = target_to_armv8(target);
1900 struct arm *arm = &armv8->arm;
1901 int retval;
1902
1903 armv8_reg_current(arm, 1)->dirty = true;
1904
1905 /* Step 1.d - Change DCC to memory mode */
1906 *dscr |= DSCR_MA;
1907 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1908 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1909 if (retval != ERROR_OK)
1910 return retval;
1911
1912
1913 /* Step 2.a - Do the write */
1914 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1915 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1916 if (retval != ERROR_OK)
1917 return retval;
1918
1919 /* Step 3.a - Switch DTR mode back to Normal mode */
1920 *dscr &= ~DSCR_MA;
1921 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1922 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1923 if (retval != ERROR_OK)
1924 return retval;
1925
1926 return ERROR_OK;
1927 }
1928
1929 static int aarch64_write_cpu_memory(struct target *target,
1930 uint64_t address, uint32_t size,
1931 uint32_t count, const uint8_t *buffer)
1932 {
1933 /* write memory through APB-AP */
1934 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1935 struct armv8_common *armv8 = target_to_armv8(target);
1936 struct arm_dpm *dpm = &armv8->dpm;
1937 struct arm *arm = &armv8->arm;
1938 uint32_t dscr;
1939
1940 if (target->state != TARGET_HALTED) {
1941 LOG_WARNING("target not halted");
1942 return ERROR_TARGET_NOT_HALTED;
1943 }
1944
1945 /* Mark register X0 as dirty, as it will be used
1946 * for transferring the data.
1947 * It will be restored automatically when exiting
1948 * debug mode
1949 */
1950 armv8_reg_current(arm, 0)->dirty = true;
1951
1952 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1953
1954 /* Read DSCR */
1955 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1956 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1957 if (retval != ERROR_OK)
1958 return retval;
1959
1960 /* Set Normal access mode */
1961 dscr = (dscr & ~DSCR_MA);
1962 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1963 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1964 if (retval != ERROR_OK)
1965 return retval;
1966
1967 if (arm->core_state == ARM_STATE_AARCH64) {
1968 /* Write X0 with value 'address' using write procedure */
1969 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1970 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1971 retval = dpm->instr_write_data_dcc_64(dpm,
1972 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1973 } else {
1974 /* Write R0 with value 'address' using write procedure */
1975 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1976 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1977 retval = dpm->instr_write_data_dcc(dpm,
1978 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1979 }
1980
1981 if (retval != ERROR_OK)
1982 return retval;
1983
1984 if (size == 4 && (address % 4) == 0)
1985 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1986 else
1987 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1988
1989 if (retval != ERROR_OK) {
1990 /* Unset DTR mode */
1991 mem_ap_read_atomic_u32(armv8->debug_ap,
1992 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1993 dscr &= ~DSCR_MA;
1994 mem_ap_write_atomic_u32(armv8->debug_ap,
1995 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1996 }
1997
1998 /* Check for sticky abort flags in the DSCR */
1999 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2000 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2001 if (retval != ERROR_OK)
2002 return retval;
2003
2004 dpm->dscr = dscr;
2005 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2006 /* Abort occurred - clear it and exit */
2007 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2008 armv8_dpm_handle_exception(dpm, true);
2009 return ERROR_FAIL;
2010 }
2011
2012 /* Done */
2013 return ERROR_OK;
2014 }
2015
2016 static int aarch64_read_cpu_memory_slow(struct target *target,
2017 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2018 {
2019 struct armv8_common *armv8 = target_to_armv8(target);
2020 struct arm_dpm *dpm = &armv8->dpm;
2021 struct arm *arm = &armv8->arm;
2022 int retval;
2023
2024 armv8_reg_current(arm, 1)->dirty = true;
2025
2026 /* change DCC to normal mode (if necessary) */
2027 if (*dscr & DSCR_MA) {
2028 *dscr &= DSCR_MA;
2029 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2030 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2031 if (retval != ERROR_OK)
2032 return retval;
2033 }
2034
2035 while (count) {
2036 uint32_t opcode, data;
2037
2038 if (size == 1)
2039 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2040 else if (size == 2)
2041 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2042 else
2043 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2044 retval = dpm->instr_execute(dpm, opcode);
2045 if (retval != ERROR_OK)
2046 return retval;
2047
2048 if (arm->core_state == ARM_STATE_AARCH64)
2049 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2050 else
2051 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2052 if (retval != ERROR_OK)
2053 return retval;
2054
2055 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2056 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2057 if (retval != ERROR_OK)
2058 return retval;
2059
2060 if (size == 1)
2061 *buffer = (uint8_t)data;
2062 else if (size == 2)
2063 target_buffer_set_u16(target, buffer, (uint16_t)data);
2064 else
2065 target_buffer_set_u32(target, buffer, data);
2066
2067 /* Advance */
2068 buffer += size;
2069 --count;
2070 }
2071
2072 return ERROR_OK;
2073 }
2074
2075 static int aarch64_read_cpu_memory_fast(struct target *target,
2076 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2077 {
2078 struct armv8_common *armv8 = target_to_armv8(target);
2079 struct arm_dpm *dpm = &armv8->dpm;
2080 struct arm *arm = &armv8->arm;
2081 int retval;
2082 uint32_t value;
2083
2084 /* Mark X1 as dirty */
2085 armv8_reg_current(arm, 1)->dirty = true;
2086
2087 if (arm->core_state == ARM_STATE_AARCH64) {
2088 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2089 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2090 } else {
2091 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2092 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2093 }
2094
2095 if (retval != ERROR_OK)
2096 return retval;
2097
2098 /* Step 1.e - Change DCC to memory mode */
2099 *dscr |= DSCR_MA;
2100 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2101 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2102 if (retval != ERROR_OK)
2103 return retval;
2104
2105 /* Step 1.f - read DBGDTRTX and discard the value */
2106 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2107 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2108 if (retval != ERROR_OK)
2109 return retval;
2110
2111 count--;
2112 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2113 * Abort flags are sticky, so can be read at end of transactions
2114 *
2115 * This data is read in aligned to 32 bit boundary.
2116 */
2117
2118 if (count) {
2119 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2120 * increments X0 by 4. */
2121 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2122 armv8->debug_base + CPUV8_DBG_DTRTX);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 }
2126
2127 /* Step 3.a - set DTR access mode back to Normal mode */
2128 *dscr &= ~DSCR_MA;
2129 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2130 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2131 if (retval != ERROR_OK)
2132 return retval;
2133
2134 /* Step 3.b - read DBGDTRTX for the final value */
2135 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2136 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2137 if (retval != ERROR_OK)
2138 return retval;
2139
2140 target_buffer_set_u32(target, buffer + count * 4, value);
2141 return retval;
2142 }
2143
2144 static int aarch64_read_cpu_memory(struct target *target,
2145 target_addr_t address, uint32_t size,
2146 uint32_t count, uint8_t *buffer)
2147 {
2148 /* read memory through APB-AP */
2149 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2150 struct armv8_common *armv8 = target_to_armv8(target);
2151 struct arm_dpm *dpm = &armv8->dpm;
2152 struct arm *arm = &armv8->arm;
2153 uint32_t dscr;
2154
2155 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2156 address, size, count);
2157
2158 if (target->state != TARGET_HALTED) {
2159 LOG_WARNING("target not halted");
2160 return ERROR_TARGET_NOT_HALTED;
2161 }
2162
2163 /* Mark register X0 as dirty, as it will be used
2164 * for transferring the data.
2165 * It will be restored automatically when exiting
2166 * debug mode
2167 */
2168 armv8_reg_current(arm, 0)->dirty = true;
2169
2170 /* Read DSCR */
2171 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2172 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2173 if (retval != ERROR_OK)
2174 return retval;
2175
2176 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2177
2178 /* Set Normal access mode */
2179 dscr &= ~DSCR_MA;
2180 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2181 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2182 if (retval != ERROR_OK)
2183 return retval;
2184
2185 if (arm->core_state == ARM_STATE_AARCH64) {
2186 /* Write X0 with value 'address' using write procedure */
2187 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2188 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2189 retval = dpm->instr_write_data_dcc_64(dpm,
2190 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2191 } else {
2192 /* Write R0 with value 'address' using write procedure */
2193 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2194 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2195 retval = dpm->instr_write_data_dcc(dpm,
2196 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2197 }
2198
2199 if (retval != ERROR_OK)
2200 return retval;
2201
2202 if (size == 4 && (address % 4) == 0)
2203 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2204 else
2205 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2206
2207 if (dscr & DSCR_MA) {
2208 dscr &= ~DSCR_MA;
2209 mem_ap_write_atomic_u32(armv8->debug_ap,
2210 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2211 }
2212
2213 if (retval != ERROR_OK)
2214 return retval;
2215
2216 /* Check for sticky abort flags in the DSCR */
2217 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2218 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2219 if (retval != ERROR_OK)
2220 return retval;
2221
2222 dpm->dscr = dscr;
2223
2224 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2225 /* Abort occurred - clear it and exit */
2226 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2227 armv8_dpm_handle_exception(dpm, true);
2228 return ERROR_FAIL;
2229 }
2230
2231 /* Done */
2232 return ERROR_OK;
2233 }
2234
2235 static int aarch64_read_phys_memory(struct target *target,
2236 target_addr_t address, uint32_t size,
2237 uint32_t count, uint8_t *buffer)
2238 {
2239 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2240
2241 if (count && buffer) {
2242 /* read memory through APB-AP */
2243 retval = aarch64_mmu_modify(target, 0);
2244 if (retval != ERROR_OK)
2245 return retval;
2246 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2247 }
2248 return retval;
2249 }
2250
2251 static int aarch64_read_memory(struct target *target, target_addr_t address,
2252 uint32_t size, uint32_t count, uint8_t *buffer)
2253 {
2254 int mmu_enabled = 0;
2255 int retval;
2256
2257 /* determine if MMU was enabled on target stop */
2258 retval = aarch64_mmu(target, &mmu_enabled);
2259 if (retval != ERROR_OK)
2260 return retval;
2261
2262 if (mmu_enabled) {
2263 /* enable MMU as we could have disabled it for phys access */
2264 retval = aarch64_mmu_modify(target, 1);
2265 if (retval != ERROR_OK)
2266 return retval;
2267 }
2268 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2269 }
2270
2271 static int aarch64_write_phys_memory(struct target *target,
2272 target_addr_t address, uint32_t size,
2273 uint32_t count, const uint8_t *buffer)
2274 {
2275 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2276
2277 if (count && buffer) {
2278 /* write memory through APB-AP */
2279 retval = aarch64_mmu_modify(target, 0);
2280 if (retval != ERROR_OK)
2281 return retval;
2282 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2283 }
2284
2285 return retval;
2286 }
2287
2288 static int aarch64_write_memory(struct target *target, target_addr_t address,
2289 uint32_t size, uint32_t count, const uint8_t *buffer)
2290 {
2291 int mmu_enabled = 0;
2292 int retval;
2293
2294 /* determine if MMU was enabled on target stop */
2295 retval = aarch64_mmu(target, &mmu_enabled);
2296 if (retval != ERROR_OK)
2297 return retval;
2298
2299 if (mmu_enabled) {
2300 /* enable MMU as we could have disabled it for phys access */
2301 retval = aarch64_mmu_modify(target, 1);
2302 if (retval != ERROR_OK)
2303 return retval;
2304 }
2305 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2306 }
2307
2308 static int aarch64_handle_target_request(void *priv)
2309 {
2310 struct target *target = priv;
2311 struct armv8_common *armv8 = target_to_armv8(target);
2312 int retval;
2313
2314 if (!target_was_examined(target))
2315 return ERROR_OK;
2316 if (!target->dbg_msg_enabled)
2317 return ERROR_OK;
2318
2319 if (target->state == TARGET_RUNNING) {
2320 uint32_t request;
2321 uint32_t dscr;
2322 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2323 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2324
2325 /* check if we have data */
2326 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2327 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2328 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2329 if (retval == ERROR_OK) {
2330 target_request(target, request);
2331 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2332 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2333 }
2334 }
2335 }
2336
2337 return ERROR_OK;
2338 }
2339
2340 static int aarch64_examine_first(struct target *target)
2341 {
2342 struct aarch64_common *aarch64 = target_to_aarch64(target);
2343 struct armv8_common *armv8 = &aarch64->armv8_common;
2344 struct adiv5_dap *swjdp = armv8->arm.dap;
2345 struct aarch64_private_config *pc = target->private_config;
2346 int i;
2347 int retval = ERROR_OK;
2348 uint64_t debug, ttypr;
2349 uint32_t cpuid;
2350 uint32_t tmp0, tmp1, tmp2, tmp3;
2351 debug = ttypr = cpuid = 0;
2352
2353 if (pc == NULL)
2354 return ERROR_FAIL;
2355
2356 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2357 /* Search for the APB-AB */
2358 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2359 if (retval != ERROR_OK) {
2360 LOG_ERROR("Could not find APB-AP for debug access");
2361 return retval;
2362 }
2363 } else {
2364 armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
2365 }
2366
2367 retval = mem_ap_init(armv8->debug_ap);
2368 if (retval != ERROR_OK) {
2369 LOG_ERROR("Could not initialize the APB-AP");
2370 return retval;
2371 }
2372
2373 armv8->debug_ap->memaccess_tck = 10;
2374
2375 if (!target->dbgbase_set) {
2376 uint32_t dbgbase;
2377 /* Get ROM Table base */
2378 uint32_t apid;
2379 int32_t coreidx = target->coreid;
2380 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2381 if (retval != ERROR_OK)
2382 return retval;
2383 /* Lookup 0x15 -- Processor DAP */
2384 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2385 &armv8->debug_base, &coreidx);
2386 if (retval != ERROR_OK)
2387 return retval;
2388 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2389 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2390 } else
2391 armv8->debug_base = target->dbgbase;
2392
2393 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2394 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2395 if (retval != ERROR_OK) {
2396 LOG_DEBUG("Examine %s failed", "oslock");
2397 return retval;
2398 }
2399
2400 retval = mem_ap_read_u32(armv8->debug_ap,
2401 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2402 if (retval != ERROR_OK) {
2403 LOG_DEBUG("Examine %s failed", "CPUID");
2404 return retval;
2405 }
2406
2407 retval = mem_ap_read_u32(armv8->debug_ap,
2408 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2409 retval += mem_ap_read_u32(armv8->debug_ap,
2410 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2411 if (retval != ERROR_OK) {
2412 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2413 return retval;
2414 }
2415 retval = mem_ap_read_u32(armv8->debug_ap,
2416 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2417 retval += mem_ap_read_u32(armv8->debug_ap,
2418 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2419 if (retval != ERROR_OK) {
2420 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2421 return retval;
2422 }
2423
2424 retval = dap_run(armv8->debug_ap->dap);
2425 if (retval != ERROR_OK) {
2426 LOG_ERROR("%s: examination failed\n", target_name(target));
2427 return retval;
2428 }
2429
2430 ttypr |= tmp1;
2431 ttypr = (ttypr << 32) | tmp0;
2432 debug |= tmp3;
2433 debug = (debug << 32) | tmp2;
2434
2435 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2436 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2437 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2438
2439 if (pc->cti == NULL)
2440 return ERROR_FAIL;
2441
2442 armv8->cti = pc->cti;
2443
2444 retval = aarch64_dpm_setup(aarch64, debug);
2445 if (retval != ERROR_OK)
2446 return retval;
2447
2448 /* Setup Breakpoint Register Pairs */
2449 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2450 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2451 aarch64->brp_num_available = aarch64->brp_num;
2452 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2453 for (i = 0; i < aarch64->brp_num; i++) {
2454 aarch64->brp_list[i].used = 0;
2455 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2456 aarch64->brp_list[i].type = BRP_NORMAL;
2457 else
2458 aarch64->brp_list[i].type = BRP_CONTEXT;
2459 aarch64->brp_list[i].value = 0;
2460 aarch64->brp_list[i].control = 0;
2461 aarch64->brp_list[i].BRPn = i;
2462 }
2463
2464 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2465
2466 target->state = TARGET_UNKNOWN;
2467 target->debug_reason = DBG_REASON_NOTHALTED;
2468 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2469 target_set_examined(target);
2470 return ERROR_OK;
2471 }
2472
2473 static int aarch64_examine(struct target *target)
2474 {
2475 int retval = ERROR_OK;
2476
2477 /* don't re-probe hardware after each reset */
2478 if (!target_was_examined(target))
2479 retval = aarch64_examine_first(target);
2480
2481 /* Configure core debug access */
2482 if (retval == ERROR_OK)
2483 retval = aarch64_init_debug_access(target);
2484
2485 return retval;
2486 }
2487
2488 /*
2489 * Cortex-A8 target creation and initialization
2490 */
2491
2492 static int aarch64_init_target(struct command_context *cmd_ctx,
2493 struct target *target)
2494 {
2495 /* examine_first() does a bunch of this */
2496 arm_semihosting_init(target);
2497 return ERROR_OK;
2498 }
2499
2500 static int aarch64_init_arch_info(struct target *target,
2501 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2502 {
2503 struct armv8_common *armv8 = &aarch64->armv8_common;
2504
2505 /* Setup struct aarch64_common */
2506 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2507 armv8->arm.dap = dap;
2508
2509 /* register arch-specific functions */
2510 armv8->examine_debug_reason = NULL;
2511 armv8->post_debug_entry = aarch64_post_debug_entry;
2512 armv8->pre_restore_context = NULL;
2513 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2514
2515 armv8_init_arch_info(target, armv8);
2516 target_register_timer_callback(aarch64_handle_target_request, 1,
2517 TARGET_TIMER_TYPE_PERIODIC, target);
2518
2519 return ERROR_OK;
2520 }
2521
2522 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2523 {
2524 struct aarch64_private_config *pc = target->private_config;
2525 struct aarch64_common *aarch64;
2526
2527 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2528 return ERROR_FAIL;
2529
2530 aarch64 = calloc(1, sizeof(struct aarch64_common));
2531 if (aarch64 == NULL) {
2532 LOG_ERROR("Out of memory");
2533 return ERROR_FAIL;
2534 }
2535
2536 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2537 }
2538
2539 static void aarch64_deinit_target(struct target *target)
2540 {
2541 struct aarch64_common *aarch64 = target_to_aarch64(target);
2542 struct armv8_common *armv8 = &aarch64->armv8_common;
2543 struct arm_dpm *dpm = &armv8->dpm;
2544
2545 armv8_free_reg_cache(target);
2546 free(aarch64->brp_list);
2547 free(dpm->dbp);
2548 free(dpm->dwp);
2549 free(target->private_config);
2550 free(aarch64);
2551 }
2552
2553 static int aarch64_mmu(struct target *target, int *enabled)
2554 {
2555 if (target->state != TARGET_HALTED) {
2556 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2557 return ERROR_TARGET_INVALID;
2558 }
2559
2560 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2561 return ERROR_OK;
2562 }
2563
2564 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2565 target_addr_t *phys)
2566 {
2567 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2568 }
2569
2570 /*
2571 * private target configuration items
2572 */
2573 enum aarch64_cfg_param {
2574 CFG_CTI,
2575 };
2576
2577 static const Jim_Nvp nvp_config_opts[] = {
2578 { .name = "-cti", .value = CFG_CTI },
2579 { .name = NULL, .value = -1 }
2580 };
2581
2582 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2583 {
2584 struct aarch64_private_config *pc;
2585 Jim_Nvp *n;
2586 int e;
2587
2588 pc = (struct aarch64_private_config *)target->private_config;
2589 if (pc == NULL) {
2590 pc = calloc(1, sizeof(struct aarch64_private_config));
2591 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2592 target->private_config = pc;
2593 }
2594
2595 /*
2596 * Call adiv5_jim_configure() to parse the common DAP options
2597 * It will return JIM_CONTINUE if it didn't find any known
2598 * options, JIM_OK if it correctly parsed the topmost option
2599 * and JIM_ERR if an error occurred during parameter evaluation.
2600 * For JIM_CONTINUE, we check our own params.
2601 *
2602 * adiv5_jim_configure() assumes 'private_config' to point to
2603 * 'struct adiv5_private_config'. Override 'private_config'!
2604 */
2605 target->private_config = &pc->adiv5_config;
2606 e = adiv5_jim_configure(target, goi);
2607 target->private_config = pc;
2608 if (e != JIM_CONTINUE)
2609 return e;
2610
2611 /* parse config or cget options ... */
2612 if (goi->argc > 0) {
2613 Jim_SetEmptyResult(goi->interp);
2614
2615 /* check first if topmost item is for us */
2616 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2617 goi->argv[0], &n);
2618 if (e != JIM_OK)
2619 return JIM_CONTINUE;
2620
2621 e = Jim_GetOpt_Obj(goi, NULL);
2622 if (e != JIM_OK)
2623 return e;
2624
2625 switch (n->value) {
2626 case CFG_CTI: {
2627 if (goi->isconfigure) {
2628 Jim_Obj *o_cti;
2629 struct arm_cti *cti;
2630 e = Jim_GetOpt_Obj(goi, &o_cti);
2631 if (e != JIM_OK)
2632 return e;
2633 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2634 if (cti == NULL) {
2635 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2636 return JIM_ERR;
2637 }
2638 pc->cti = cti;
2639 } else {
2640 if (goi->argc != 0) {
2641 Jim_WrongNumArgs(goi->interp,
2642 goi->argc, goi->argv,
2643 "NO PARAMS");
2644 return JIM_ERR;
2645 }
2646
2647 if (pc == NULL || pc->cti == NULL) {
2648 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2649 return JIM_ERR;
2650 }
2651 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2652 }
2653 break;
2654 }
2655
2656 default:
2657 return JIM_CONTINUE;
2658 }
2659 }
2660
2661 return JIM_OK;
2662 }
2663
2664 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2665 {
2666 struct target *target = get_current_target(CMD_CTX);
2667 struct armv8_common *armv8 = target_to_armv8(target);
2668
2669 return armv8_handle_cache_info_command(CMD,
2670 &armv8->armv8_mmu.armv8_cache);
2671 }
2672
2673 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2674 {
2675 struct target *target = get_current_target(CMD_CTX);
2676 if (!target_was_examined(target)) {
2677 LOG_ERROR("target not examined yet");
2678 return ERROR_FAIL;
2679 }
2680
2681 return aarch64_init_debug_access(target);
2682 }
2683
2684 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2685 {
2686 struct target *target = get_current_target(CMD_CTX);
2687
2688 if (target == NULL) {
2689 LOG_ERROR("No target selected");
2690 return ERROR_FAIL;
2691 }
2692
2693 struct aarch64_common *aarch64 = target_to_aarch64(target);
2694
2695 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2696 command_print(CMD, "current target isn't an AArch64");
2697 return ERROR_FAIL;
2698 }
2699
2700 int count = 1;
2701 target_addr_t address;
2702
2703 switch (CMD_ARGC) {
2704 case 2:
2705 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2706 /* FALL THROUGH */
2707 case 1:
2708 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2709 break;
2710 default:
2711 return ERROR_COMMAND_SYNTAX_ERROR;
2712 }
2713
2714 return a64_disassemble(CMD, target, address, count);
2715 }
2716
2717 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2718 {
2719 struct target *target = get_current_target(CMD_CTX);
2720 struct aarch64_common *aarch64 = target_to_aarch64(target);
2721
2722 static const Jim_Nvp nvp_maskisr_modes[] = {
2723 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2724 { .name = "on", .value = AARCH64_ISRMASK_ON },
2725 { .name = NULL, .value = -1 },
2726 };
2727 const Jim_Nvp *n;
2728
2729 if (CMD_ARGC > 0) {
2730 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2731 if (n->name == NULL) {
2732 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2733 return ERROR_COMMAND_SYNTAX_ERROR;
2734 }
2735
2736 aarch64->isrmasking_mode = n->value;
2737 }
2738
2739 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2740 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2741
2742 return ERROR_OK;
2743 }
2744
2745 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2746 {
2747 struct command_context *context;
2748 struct target *target;
2749 struct arm *arm;
2750 int retval;
2751 bool is_mcr = false;
2752 int arg_cnt = 0;
2753
2754 if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2755 is_mcr = true;
2756 arg_cnt = 7;
2757 } else {
2758 arg_cnt = 6;
2759 }
2760
2761 context = current_command_context(interp);
2762 assert(context != NULL);
2763
2764 target = get_current_target(context);
2765 if (target == NULL) {
2766 LOG_ERROR("%s: no current target", __func__);
2767 return JIM_ERR;
2768 }
2769 if (!target_was_examined(target)) {
2770 LOG_ERROR("%s: not yet examined", target_name(target));
2771 return JIM_ERR;
2772 }
2773
2774 arm = target_to_arm(target);
2775 if (!is_arm(arm)) {
2776 LOG_ERROR("%s: not an ARM", target_name(target));
2777 return JIM_ERR;
2778 }
2779
2780 if (target->state != TARGET_HALTED)
2781 return ERROR_TARGET_NOT_HALTED;
2782
2783 if (arm->core_state == ARM_STATE_AARCH64) {
2784 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2785 return JIM_ERR;
2786 }
2787
2788 if (argc != arg_cnt) {
2789 LOG_ERROR("%s: wrong number of arguments", __func__);
2790 return JIM_ERR;
2791 }
2792
2793 int cpnum;
2794 uint32_t op1;
2795 uint32_t op2;
2796 uint32_t CRn;
2797 uint32_t CRm;
2798 uint32_t value;
2799 long l;
2800
2801 /* NOTE: parameter sequence matches ARM instruction set usage:
2802 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2803 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2804 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2805 */
2806 retval = Jim_GetLong(interp, argv[1], &l);
2807 if (retval != JIM_OK)
2808 return retval;
2809 if (l & ~0xf) {
2810 LOG_ERROR("%s: %s %d out of range", __func__,
2811 "coprocessor", (int) l);
2812 return JIM_ERR;
2813 }
2814 cpnum = l;
2815
2816 retval = Jim_GetLong(interp, argv[2], &l);
2817 if (retval != JIM_OK)
2818 return retval;
2819 if (l & ~0x7) {
2820 LOG_ERROR("%s: %s %d out of range", __func__,
2821 "op1", (int) l);
2822 return JIM_ERR;
2823 }
2824 op1 = l;
2825
2826 retval = Jim_GetLong(interp, argv[3], &l);
2827 if (retval != JIM_OK)
2828 return retval;
2829 if (l & ~0xf) {
2830 LOG_ERROR("%s: %s %d out of range", __func__,
2831 "CRn", (int) l);
2832 return JIM_ERR;
2833 }
2834 CRn = l;
2835
2836 retval = Jim_GetLong(interp, argv[4], &l);
2837 if (retval != JIM_OK)
2838 return retval;
2839 if (l & ~0xf) {
2840 LOG_ERROR("%s: %s %d out of range", __func__,
2841 "CRm", (int) l);
2842 return JIM_ERR;
2843 }
2844 CRm = l;
2845
2846 retval = Jim_GetLong(interp, argv[5], &l);
2847 if (retval != JIM_OK)
2848 return retval;
2849 if (l & ~0x7) {
2850 LOG_ERROR("%s: %s %d out of range", __func__,
2851 "op2", (int) l);
2852 return JIM_ERR;
2853 }
2854 op2 = l;
2855
2856 value = 0;
2857
2858 if (is_mcr == true) {
2859 retval = Jim_GetLong(interp, argv[6], &l);
2860 if (retval != JIM_OK)
2861 return retval;
2862 value = l;
2863
2864 /* NOTE: parameters reordered! */
2865 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2866 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2867 if (retval != ERROR_OK)
2868 return JIM_ERR;
2869 } else {
2870 /* NOTE: parameters reordered! */
2871 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2872 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2873 if (retval != ERROR_OK)
2874 return JIM_ERR;
2875
2876 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2877 }
2878
2879 return JIM_OK;
2880 }
2881
2882 static const struct command_registration aarch64_exec_command_handlers[] = {
2883 {
2884 .name = "cache_info",
2885 .handler = aarch64_handle_cache_info_command,
2886 .mode = COMMAND_EXEC,
2887 .help = "display information about target caches",
2888 .usage = "",
2889 },
2890 {
2891 .name = "dbginit",
2892 .handler = aarch64_handle_dbginit_command,
2893 .mode = COMMAND_EXEC,
2894 .help = "Initialize core debug",
2895 .usage = "",
2896 },
2897 {
2898 .name = "disassemble",
2899 .handler = aarch64_handle_disassemble_command,
2900 .mode = COMMAND_EXEC,
2901 .help = "Disassemble instructions",
2902 .usage = "address [count]",
2903 },
2904 {
2905 .name = "maskisr",
2906 .handler = aarch64_mask_interrupts_command,
2907 .mode = COMMAND_ANY,
2908 .help = "mask aarch64 interrupts during single-step",
2909 .usage = "['on'|'off']",
2910 },
2911 {
2912 .name = "mcr",
2913 .mode = COMMAND_EXEC,
2914 .jim_handler = jim_mcrmrc,
2915 .help = "write coprocessor register",
2916 .usage = "cpnum op1 CRn CRm op2 value",
2917 },
2918 {
2919 .name = "mrc",
2920 .mode = COMMAND_EXEC,
2921 .jim_handler = jim_mcrmrc,
2922 .help = "read coprocessor register",
2923 .usage = "cpnum op1 CRn CRm op2",
2924 },
2925 {
2926 .chain = smp_command_handlers,
2927 },
2928
2929
2930 COMMAND_REGISTRATION_DONE
2931 };
2932
2933 extern const struct command_registration semihosting_common_handlers[];
2934
2935 static const struct command_registration aarch64_command_handlers[] = {
2936 {
2937 .name = "arm",
2938 .mode = COMMAND_ANY,
2939 .help = "ARM Command Group",
2940 .usage = "",
2941 .chain = semihosting_common_handlers
2942 },
2943 {
2944 .chain = armv8_command_handlers,
2945 },
2946 {
2947 .name = "aarch64",
2948 .mode = COMMAND_ANY,
2949 .help = "Aarch64 command group",
2950 .usage = "",
2951 .chain = aarch64_exec_command_handlers,
2952 },
2953 COMMAND_REGISTRATION_DONE
2954 };
2955
2956 struct target_type aarch64_target = {
2957 .name = "aarch64",
2958
2959 .poll = aarch64_poll,
2960 .arch_state = armv8_arch_state,
2961
2962 .halt = aarch64_halt,
2963 .resume = aarch64_resume,
2964 .step = aarch64_step,
2965
2966 .assert_reset = aarch64_assert_reset,
2967 .deassert_reset = aarch64_deassert_reset,
2968
2969 /* REVISIT allow exporting VFP3 registers ... */
2970 .get_gdb_arch = armv8_get_gdb_arch,
2971 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2972
2973 .read_memory = aarch64_read_memory,
2974 .write_memory = aarch64_write_memory,
2975
2976 .add_breakpoint = aarch64_add_breakpoint,
2977 .add_context_breakpoint = aarch64_add_context_breakpoint,
2978 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2979 .remove_breakpoint = aarch64_remove_breakpoint,
2980 .add_watchpoint = NULL,
2981 .remove_watchpoint = NULL,
2982
2983 .commands = aarch64_command_handlers,
2984 .target_create = aarch64_target_create,
2985 .target_jim_configure = aarch64_jim_configure,
2986 .init_target = aarch64_init_target,
2987 .deinit_target = aarch64_deinit_target,
2988 .examine = aarch64_examine,
2989
2990 .read_phys_memory = aarch64_read_phys_memory,
2991 .write_phys_memory = aarch64_write_phys_memory,
2992 .mmu = aarch64_mmu,
2993 .virt2phys = aarch64_virt2phys,
2994 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)