aarch64: don't try resuming if target is not halted
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 uint32_t dscr;
286
287 /* Read DSCR */
288 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
289 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
290 if (ERROR_OK != retval)
291 return retval;
292
293 /* clear bitfield */
294 dscr &= ~bit_mask;
295 /* put new value */
296 dscr |= value & bit_mask;
297
298 /* write new DSCR */
299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
300 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
301 return retval;
302 }
303
304 static struct target *get_aarch64(struct target *target, int32_t coreid)
305 {
306 struct target_list *head;
307 struct target *curr;
308
309 head = target->head;
310 while (head != (struct target_list *)NULL) {
311 curr = head->target;
312 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
313 return curr;
314 head = head->next;
315 }
316 return target;
317 }
318 static int aarch64_halt(struct target *target);
319
320 static int aarch64_halt_smp(struct target *target)
321 {
322 int retval = ERROR_OK;
323 struct target_list *head = target->head;
324
325 while (head != (struct target_list *)NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328
329 /* open the gate for channel 0 to let HALT requests pass to the CTM */
330 if (curr->smp) {
331 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
332 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
333 if (retval == ERROR_OK)
334 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
335 }
336 if (retval != ERROR_OK)
337 break;
338
339 head = head->next;
340 }
341
342 /* halt the target PE */
343 if (retval == ERROR_OK)
344 retval = aarch64_halt(target);
345
346 return retval;
347 }
348
349 static int update_halt_gdb(struct target *target)
350 {
351 int retval = 0;
352 if (target->gdb_service && target->gdb_service->core[0] == -1) {
353 target->gdb_service->target = target;
354 target->gdb_service->core[0] = target->coreid;
355 retval += aarch64_halt_smp(target);
356 }
357 return retval;
358 }
359
360 /*
361 * Cortex-A8 Run control
362 */
363
364 static int aarch64_poll(struct target *target)
365 {
366 int retval = ERROR_OK;
367 uint32_t dscr;
368 struct aarch64_common *aarch64 = target_to_aarch64(target);
369 struct armv8_common *armv8 = &aarch64->armv8_common;
370 enum target_state prev_target_state = target->state;
371 /* toggle to another core is done by gdb as follow */
372 /* maint packet J core_id */
373 /* continue */
374 /* the next polling trigger an halt event sent to gdb */
375 if ((target->state == TARGET_HALTED) && (target->smp) &&
376 (target->gdb_service) &&
377 (target->gdb_service->target == NULL)) {
378 target->gdb_service->target =
379 get_aarch64(target, target->gdb_service->core[1]);
380 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
381 return retval;
382 }
383 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
384 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
385 if (retval != ERROR_OK)
386 return retval;
387 aarch64->cpudbg_dscr = dscr;
388
389 if (DSCR_RUN_MODE(dscr) == 0x3) {
390 if (prev_target_state != TARGET_HALTED) {
391 /* We have a halting debug event */
392 LOG_DEBUG("Target %s halted", target_name(target));
393 target->state = TARGET_HALTED;
394 if ((prev_target_state == TARGET_RUNNING)
395 || (prev_target_state == TARGET_UNKNOWN)
396 || (prev_target_state == TARGET_RESET)) {
397 retval = aarch64_debug_entry(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (target->smp) {
401 retval = update_halt_gdb(target);
402 if (retval != ERROR_OK)
403 return retval;
404 }
405 target_call_event_callbacks(target,
406 TARGET_EVENT_HALTED);
407 }
408 if (prev_target_state == TARGET_DEBUG_RUNNING) {
409 LOG_DEBUG(" ");
410
411 retval = aarch64_debug_entry(target);
412 if (retval != ERROR_OK)
413 return retval;
414 if (target->smp) {
415 retval = update_halt_gdb(target);
416 if (retval != ERROR_OK)
417 return retval;
418 }
419
420 target_call_event_callbacks(target,
421 TARGET_EVENT_DEBUG_HALTED);
422 }
423 }
424 } else
425 target->state = TARGET_RUNNING;
426
427 return retval;
428 }
429
430 static int aarch64_halt(struct target *target)
431 {
432 int retval = ERROR_OK;
433 uint32_t dscr;
434 struct armv8_common *armv8 = target_to_armv8(target);
435
436 /*
437 * add HDE in halting debug mode
438 */
439 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
440 if (retval != ERROR_OK)
441 return retval;
442
443 /* trigger an event on channel 0, this outputs a halt request to the PE */
444 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
445 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
446 if (retval != ERROR_OK)
447 return retval;
448
449 long long then = timeval_ms();
450 for (;; ) {
451 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
452 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 if ((dscr & DSCRV8_HALT_MASK) != 0)
456 break;
457 if (timeval_ms() > then + 1000) {
458 LOG_ERROR("Timeout waiting for halt");
459 return ERROR_FAIL;
460 }
461 }
462
463 target->debug_reason = DBG_REASON_DBGRQ;
464
465 return ERROR_OK;
466 }
467
468 static int aarch64_internal_restore(struct target *target, int current,
469 uint64_t *address, int handle_breakpoints, int debug_execution)
470 {
471 struct armv8_common *armv8 = target_to_armv8(target);
472 struct arm *arm = &armv8->arm;
473 int retval;
474 uint64_t resume_pc;
475
476 if (!debug_execution)
477 target_free_all_working_areas(target);
478
479 /* current = 1: continue on current pc, otherwise continue at <address> */
480 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
481 if (!current)
482 resume_pc = *address;
483 else
484 *address = resume_pc;
485
486 /* Make sure that the Armv7 gdb thumb fixups does not
487 * kill the return address
488 */
489 switch (arm->core_state) {
490 case ARM_STATE_ARM:
491 resume_pc &= 0xFFFFFFFC;
492 break;
493 case ARM_STATE_AARCH64:
494 resume_pc &= 0xFFFFFFFFFFFFFFFC;
495 break;
496 case ARM_STATE_THUMB:
497 case ARM_STATE_THUMB_EE:
498 /* When the return address is loaded into PC
499 * bit 0 must be 1 to stay in Thumb state
500 */
501 resume_pc |= 0x1;
502 break;
503 case ARM_STATE_JAZELLE:
504 LOG_ERROR("How do I resume into Jazelle state??");
505 return ERROR_FAIL;
506 }
507 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
508 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
509 arm->pc->dirty = 1;
510 arm->pc->valid = 1;
511
512 /* called it now before restoring context because it uses cpu
513 * register r0 for restoring system control register */
514 retval = aarch64_restore_system_control_reg(target);
515 if (retval == ERROR_OK)
516 retval = aarch64_restore_context(target, handle_breakpoints);
517
518 return retval;
519 }
520
521 static int aarch64_internal_restart(struct target *target, bool slave_pe)
522 {
523 struct armv8_common *armv8 = target_to_armv8(target);
524 struct arm *arm = &armv8->arm;
525 int retval;
526 uint32_t dscr;
527 /*
528 * * Restart core and wait for it to be started. Clear ITRen and sticky
529 * * exception flags: see ARMv7 ARM, C5.9.
530 *
531 * REVISIT: for single stepping, we probably want to
532 * disable IRQs by default, with optional override...
533 */
534
535 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
536 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if ((dscr & DSCR_ITE) == 0)
541 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
542 if ((dscr & DSCR_ERR) != 0)
543 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
544
545 /* make sure to acknowledge the halt event before resuming */
546 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
547 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
548
549 /*
550 * open the CTI gate for channel 1 so that the restart events
551 * get passed along to all PEs
552 */
553 if (retval == ERROR_OK)
554 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
555 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
556 if (retval != ERROR_OK)
557 return retval;
558
559 if (!slave_pe) {
560 /* trigger an event on channel 1, generates a restart request to the PE */
561 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
562 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
563 if (retval != ERROR_OK)
564 return retval;
565
566 long long then = timeval_ms();
567 for (;; ) {
568 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
569 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
570 if (retval != ERROR_OK)
571 return retval;
572 if ((dscr & DSCR_HDE) != 0)
573 break;
574 if (timeval_ms() > then + 1000) {
575 LOG_ERROR("Timeout waiting for resume");
576 return ERROR_FAIL;
577 }
578 }
579 }
580
581 target->debug_reason = DBG_REASON_NOTHALTED;
582 target->state = TARGET_RUNNING;
583
584 /* registers are now invalid */
585 register_cache_invalidate(arm->core_cache);
586 register_cache_invalidate(arm->core_cache->next);
587
588 return ERROR_OK;
589 }
590
591 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
592 {
593 int retval = 0;
594 struct target_list *head;
595 struct target *curr;
596 uint64_t address;
597 head = target->head;
598 while (head != (struct target_list *)NULL) {
599 curr = head->target;
600 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
601 /* resume current address , not in step mode */
602 retval += aarch64_internal_restore(curr, 1, &address,
603 handle_breakpoints, 0);
604 retval += aarch64_internal_restart(curr, true);
605 }
606 head = head->next;
607
608 }
609 return retval;
610 }
611
612 static int aarch64_resume(struct target *target, int current,
613 target_addr_t address, int handle_breakpoints, int debug_execution)
614 {
615 int retval = 0;
616 uint64_t addr = address;
617
618 /* dummy resume for smp toggle in order to reduce gdb impact */
619 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
620 /* simulate a start and halt of target */
621 target->gdb_service->target = NULL;
622 target->gdb_service->core[0] = target->gdb_service->core[1];
623 /* fake resume at next poll we play the target core[1], see poll*/
624 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
625 return 0;
626 }
627
628 if (target->state != TARGET_HALTED)
629 return ERROR_TARGET_NOT_HALTED;
630
631 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
632 debug_execution);
633 if (target->smp) {
634 target->gdb_service->core[0] = -1;
635 retval = aarch64_restore_smp(target, handle_breakpoints);
636 if (retval != ERROR_OK)
637 return retval;
638 }
639 aarch64_internal_restart(target, false);
640
641 if (!debug_execution) {
642 target->state = TARGET_RUNNING;
643 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
644 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
645 } else {
646 target->state = TARGET_DEBUG_RUNNING;
647 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
648 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
649 }
650
651 return ERROR_OK;
652 }
653
654 static int aarch64_debug_entry(struct target *target)
655 {
656 int retval = ERROR_OK;
657 struct aarch64_common *aarch64 = target_to_aarch64(target);
658 struct armv8_common *armv8 = target_to_armv8(target);
659 struct arm_dpm *dpm = &armv8->dpm;
660 enum arm_state core_state;
661
662 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
663
664 dpm->dscr = aarch64->cpudbg_dscr;
665 core_state = armv8_dpm_get_core_state(dpm);
666 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
667 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
668
669 /* make sure to clear all sticky errors */
670 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
671 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
672
673 /* discard async exceptions */
674 if (retval == ERROR_OK)
675 retval = dpm->instr_cpsr_sync(dpm);
676
677 if (retval != ERROR_OK)
678 return retval;
679
680 /* Examine debug reason */
681 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
682
683 /* save address of instruction that triggered the watchpoint? */
684 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
685 uint32_t tmp;
686 uint64_t wfar = 0;
687
688 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
689 armv8->debug_base + CPUV8_DBG_WFAR1,
690 &tmp);
691 if (retval != ERROR_OK)
692 return retval;
693 wfar = tmp;
694 wfar = (wfar << 32);
695 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
696 armv8->debug_base + CPUV8_DBG_WFAR0,
697 &tmp);
698 if (retval != ERROR_OK)
699 return retval;
700 wfar |= tmp;
701 armv8_dpm_report_wfar(&armv8->dpm, wfar);
702 }
703
704 retval = armv8_dpm_read_current_registers(&armv8->dpm);
705
706 if (retval == ERROR_OK && armv8->post_debug_entry)
707 retval = armv8->post_debug_entry(target);
708
709 return retval;
710 }
711
712 static int aarch64_post_debug_entry(struct target *target)
713 {
714 struct aarch64_common *aarch64 = target_to_aarch64(target);
715 struct armv8_common *armv8 = &aarch64->armv8_common;
716 int retval;
717
718 switch (armv8->arm.core_mode) {
719 case ARMV8_64_EL0T:
720 armv8_dpm_modeswitch(&armv8->dpm, ARMV8_64_EL1H);
721 /* fall through */
722 case ARMV8_64_EL1T:
723 case ARMV8_64_EL1H:
724 retval = armv8->arm.mrs(target, 3, /*op 0*/
725 0, 0, /* op1, op2 */
726 1, 0, /* CRn, CRm */
727 &aarch64->system_control_reg);
728 if (retval != ERROR_OK)
729 return retval;
730 break;
731 case ARMV8_64_EL2T:
732 case ARMV8_64_EL2H:
733 retval = armv8->arm.mrs(target, 3, /*op 0*/
734 4, 0, /* op1, op2 */
735 1, 0, /* CRn, CRm */
736 &aarch64->system_control_reg);
737 if (retval != ERROR_OK)
738 return retval;
739 break;
740 case ARMV8_64_EL3H:
741 case ARMV8_64_EL3T:
742 retval = armv8->arm.mrs(target, 3, /*op 0*/
743 6, 0, /* op1, op2 */
744 1, 0, /* CRn, CRm */
745 &aarch64->system_control_reg);
746 if (retval != ERROR_OK)
747 return retval;
748 break;
749
750 case ARM_MODE_SVC:
751 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
752 if (retval != ERROR_OK)
753 return retval;
754 break;
755
756 default:
757 LOG_INFO("cannot read system control register in this mode");
758 break;
759 }
760
761 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
762
763 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
764 aarch64->system_control_reg_curr = aarch64->system_control_reg;
765
766 if (armv8->armv8_mmu.armv8_cache.info == -1) {
767 armv8_identify_cache(armv8);
768 armv8_read_mpidr(armv8);
769 }
770
771 armv8->armv8_mmu.mmu_enabled =
772 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
773 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
774 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
775 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
776 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
777 aarch64->curr_mode = armv8->arm.core_mode;
778 return ERROR_OK;
779 }
780
781 static int aarch64_step(struct target *target, int current, target_addr_t address,
782 int handle_breakpoints)
783 {
784 struct armv8_common *armv8 = target_to_armv8(target);
785 int retval;
786 uint32_t edecr;
787
788 if (target->state != TARGET_HALTED) {
789 LOG_WARNING("target not halted");
790 return ERROR_TARGET_NOT_HALTED;
791 }
792
793 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
794 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
795 if (retval != ERROR_OK)
796 return retval;
797
798 /* make sure EDECR.SS is not set when restoring the register */
799 edecr &= ~0x4;
800
801 /* set EDECR.SS to enter hardware step mode */
802 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
803 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
804 if (retval != ERROR_OK)
805 return retval;
806
807 /* disable interrupts while stepping */
808 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
809 if (retval != ERROR_OK)
810 return ERROR_OK;
811
812 /* resume the target */
813 retval = aarch64_resume(target, current, address, 0, 0);
814 if (retval != ERROR_OK)
815 return retval;
816
817 long long then = timeval_ms();
818 while (target->state != TARGET_HALTED) {
819 retval = aarch64_poll(target);
820 if (retval != ERROR_OK)
821 return retval;
822 if (timeval_ms() > then + 1000) {
823 LOG_ERROR("timeout waiting for target halt");
824 return ERROR_FAIL;
825 }
826 }
827
828 /* restore EDECR */
829 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
830 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
831 if (retval != ERROR_OK)
832 return retval;
833
834 /* restore interrupts */
835 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
836 if (retval != ERROR_OK)
837 return ERROR_OK;
838
839 return ERROR_OK;
840 }
841
842 static int aarch64_restore_context(struct target *target, bool bpwp)
843 {
844 struct armv8_common *armv8 = target_to_armv8(target);
845
846 LOG_DEBUG("%s", target_name(target));
847
848 if (armv8->pre_restore_context)
849 armv8->pre_restore_context(target);
850
851 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
852 }
853
854 /*
855 * Cortex-A8 Breakpoint and watchpoint functions
856 */
857
858 /* Setup hardware Breakpoint Register Pair */
859 static int aarch64_set_breakpoint(struct target *target,
860 struct breakpoint *breakpoint, uint8_t matchmode)
861 {
862 int retval;
863 int brp_i = 0;
864 uint32_t control;
865 uint8_t byte_addr_select = 0x0F;
866 struct aarch64_common *aarch64 = target_to_aarch64(target);
867 struct armv8_common *armv8 = &aarch64->armv8_common;
868 struct aarch64_brp *brp_list = aarch64->brp_list;
869
870 if (breakpoint->set) {
871 LOG_WARNING("breakpoint already set");
872 return ERROR_OK;
873 }
874
875 if (breakpoint->type == BKPT_HARD) {
876 int64_t bpt_value;
877 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
878 brp_i++;
879 if (brp_i >= aarch64->brp_num) {
880 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
881 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
882 }
883 breakpoint->set = brp_i + 1;
884 if (breakpoint->length == 2)
885 byte_addr_select = (3 << (breakpoint->address & 0x02));
886 control = ((matchmode & 0x7) << 20)
887 | (1 << 13)
888 | (byte_addr_select << 5)
889 | (3 << 1) | 1;
890 brp_list[brp_i].used = 1;
891 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
892 brp_list[brp_i].control = control;
893 bpt_value = brp_list[brp_i].value;
894
895 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
896 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
897 (uint32_t)(bpt_value & 0xFFFFFFFF));
898 if (retval != ERROR_OK)
899 return retval;
900 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
901 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
902 (uint32_t)(bpt_value >> 32));
903 if (retval != ERROR_OK)
904 return retval;
905
906 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
907 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
908 brp_list[brp_i].control);
909 if (retval != ERROR_OK)
910 return retval;
911 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
912 brp_list[brp_i].control,
913 brp_list[brp_i].value);
914
915 } else if (breakpoint->type == BKPT_SOFT) {
916 uint8_t code[4];
917
918 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
919 retval = target_read_memory(target,
920 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
921 breakpoint->length, 1,
922 breakpoint->orig_instr);
923 if (retval != ERROR_OK)
924 return retval;
925
926 armv8_cache_d_inner_flush_virt(armv8,
927 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
928 breakpoint->length);
929
930 retval = target_write_memory(target,
931 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
932 breakpoint->length, 1, code);
933 if (retval != ERROR_OK)
934 return retval;
935
936 armv8_cache_d_inner_flush_virt(armv8,
937 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
938 breakpoint->length);
939
940 armv8_cache_i_inner_inval_virt(armv8,
941 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
942 breakpoint->length);
943
944 breakpoint->set = 0x11; /* Any nice value but 0 */
945 }
946
947 /* Ensure that halting debug mode is enable */
948 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
949 if (retval != ERROR_OK) {
950 LOG_DEBUG("Failed to set DSCR.HDE");
951 return retval;
952 }
953
954 return ERROR_OK;
955 }
956
957 static int aarch64_set_context_breakpoint(struct target *target,
958 struct breakpoint *breakpoint, uint8_t matchmode)
959 {
960 int retval = ERROR_FAIL;
961 int brp_i = 0;
962 uint32_t control;
963 uint8_t byte_addr_select = 0x0F;
964 struct aarch64_common *aarch64 = target_to_aarch64(target);
965 struct armv8_common *armv8 = &aarch64->armv8_common;
966 struct aarch64_brp *brp_list = aarch64->brp_list;
967
968 if (breakpoint->set) {
969 LOG_WARNING("breakpoint already set");
970 return retval;
971 }
972 /*check available context BRPs*/
973 while ((brp_list[brp_i].used ||
974 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
975 brp_i++;
976
977 if (brp_i >= aarch64->brp_num) {
978 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
979 return ERROR_FAIL;
980 }
981
982 breakpoint->set = brp_i + 1;
983 control = ((matchmode & 0x7) << 20)
984 | (1 << 13)
985 | (byte_addr_select << 5)
986 | (3 << 1) | 1;
987 brp_list[brp_i].used = 1;
988 brp_list[brp_i].value = (breakpoint->asid);
989 brp_list[brp_i].control = control;
990 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
991 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
992 brp_list[brp_i].value);
993 if (retval != ERROR_OK)
994 return retval;
995 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
996 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
997 brp_list[brp_i].control);
998 if (retval != ERROR_OK)
999 return retval;
1000 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1001 brp_list[brp_i].control,
1002 brp_list[brp_i].value);
1003 return ERROR_OK;
1004
1005 }
1006
1007 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1008 {
1009 int retval = ERROR_FAIL;
1010 int brp_1 = 0; /* holds the contextID pair */
1011 int brp_2 = 0; /* holds the IVA pair */
1012 uint32_t control_CTX, control_IVA;
1013 uint8_t CTX_byte_addr_select = 0x0F;
1014 uint8_t IVA_byte_addr_select = 0x0F;
1015 uint8_t CTX_machmode = 0x03;
1016 uint8_t IVA_machmode = 0x01;
1017 struct aarch64_common *aarch64 = target_to_aarch64(target);
1018 struct armv8_common *armv8 = &aarch64->armv8_common;
1019 struct aarch64_brp *brp_list = aarch64->brp_list;
1020
1021 if (breakpoint->set) {
1022 LOG_WARNING("breakpoint already set");
1023 return retval;
1024 }
1025 /*check available context BRPs*/
1026 while ((brp_list[brp_1].used ||
1027 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1028 brp_1++;
1029
1030 printf("brp(CTX) found num: %d\n", brp_1);
1031 if (brp_1 >= aarch64->brp_num) {
1032 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1033 return ERROR_FAIL;
1034 }
1035
1036 while ((brp_list[brp_2].used ||
1037 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1038 brp_2++;
1039
1040 printf("brp(IVA) found num: %d\n", brp_2);
1041 if (brp_2 >= aarch64->brp_num) {
1042 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1043 return ERROR_FAIL;
1044 }
1045
1046 breakpoint->set = brp_1 + 1;
1047 breakpoint->linked_BRP = brp_2;
1048 control_CTX = ((CTX_machmode & 0x7) << 20)
1049 | (brp_2 << 16)
1050 | (0 << 14)
1051 | (CTX_byte_addr_select << 5)
1052 | (3 << 1) | 1;
1053 brp_list[brp_1].used = 1;
1054 brp_list[brp_1].value = (breakpoint->asid);
1055 brp_list[brp_1].control = control_CTX;
1056 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1057 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1058 brp_list[brp_1].value);
1059 if (retval != ERROR_OK)
1060 return retval;
1061 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1062 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1063 brp_list[brp_1].control);
1064 if (retval != ERROR_OK)
1065 return retval;
1066
1067 control_IVA = ((IVA_machmode & 0x7) << 20)
1068 | (brp_1 << 16)
1069 | (1 << 13)
1070 | (IVA_byte_addr_select << 5)
1071 | (3 << 1) | 1;
1072 brp_list[brp_2].used = 1;
1073 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1074 brp_list[brp_2].control = control_IVA;
1075 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1076 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1077 brp_list[brp_2].value & 0xFFFFFFFF);
1078 if (retval != ERROR_OK)
1079 return retval;
1080 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1081 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1082 brp_list[brp_2].value >> 32);
1083 if (retval != ERROR_OK)
1084 return retval;
1085 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1086 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1087 brp_list[brp_2].control);
1088 if (retval != ERROR_OK)
1089 return retval;
1090
1091 return ERROR_OK;
1092 }
1093
1094 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1095 {
1096 int retval;
1097 struct aarch64_common *aarch64 = target_to_aarch64(target);
1098 struct armv8_common *armv8 = &aarch64->armv8_common;
1099 struct aarch64_brp *brp_list = aarch64->brp_list;
1100
1101 if (!breakpoint->set) {
1102 LOG_WARNING("breakpoint not set");
1103 return ERROR_OK;
1104 }
1105
1106 if (breakpoint->type == BKPT_HARD) {
1107 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1108 int brp_i = breakpoint->set - 1;
1109 int brp_j = breakpoint->linked_BRP;
1110 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1111 LOG_DEBUG("Invalid BRP number in breakpoint");
1112 return ERROR_OK;
1113 }
1114 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1115 brp_list[brp_i].control, brp_list[brp_i].value);
1116 brp_list[brp_i].used = 0;
1117 brp_list[brp_i].value = 0;
1118 brp_list[brp_i].control = 0;
1119 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1120 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1121 brp_list[brp_i].control);
1122 if (retval != ERROR_OK)
1123 return retval;
1124 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1125 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1126 (uint32_t)brp_list[brp_i].value);
1127 if (retval != ERROR_OK)
1128 return retval;
1129 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1130 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1131 (uint32_t)brp_list[brp_i].value);
1132 if (retval != ERROR_OK)
1133 return retval;
1134 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1135 LOG_DEBUG("Invalid BRP number in breakpoint");
1136 return ERROR_OK;
1137 }
1138 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1139 brp_list[brp_j].control, brp_list[brp_j].value);
1140 brp_list[brp_j].used = 0;
1141 brp_list[brp_j].value = 0;
1142 brp_list[brp_j].control = 0;
1143 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1144 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1145 brp_list[brp_j].control);
1146 if (retval != ERROR_OK)
1147 return retval;
1148 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1149 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1150 (uint32_t)brp_list[brp_j].value);
1151 if (retval != ERROR_OK)
1152 return retval;
1153 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1154 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1155 (uint32_t)brp_list[brp_j].value);
1156 if (retval != ERROR_OK)
1157 return retval;
1158
1159 breakpoint->linked_BRP = 0;
1160 breakpoint->set = 0;
1161 return ERROR_OK;
1162
1163 } else {
1164 int brp_i = breakpoint->set - 1;
1165 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1166 LOG_DEBUG("Invalid BRP number in breakpoint");
1167 return ERROR_OK;
1168 }
1169 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1170 brp_list[brp_i].control, brp_list[brp_i].value);
1171 brp_list[brp_i].used = 0;
1172 brp_list[brp_i].value = 0;
1173 brp_list[brp_i].control = 0;
1174 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1175 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1176 brp_list[brp_i].control);
1177 if (retval != ERROR_OK)
1178 return retval;
1179 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1180 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1181 brp_list[brp_i].value);
1182 if (retval != ERROR_OK)
1183 return retval;
1184
1185 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1186 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1187 (uint32_t)brp_list[brp_i].value);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 breakpoint->set = 0;
1191 return ERROR_OK;
1192 }
1193 } else {
1194 /* restore original instruction (kept in target endianness) */
1195
1196 armv8_cache_d_inner_flush_virt(armv8,
1197 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1198 breakpoint->length);
1199
1200 if (breakpoint->length == 4) {
1201 retval = target_write_memory(target,
1202 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1203 4, 1, breakpoint->orig_instr);
1204 if (retval != ERROR_OK)
1205 return retval;
1206 } else {
1207 retval = target_write_memory(target,
1208 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1209 2, 1, breakpoint->orig_instr);
1210 if (retval != ERROR_OK)
1211 return retval;
1212 }
1213
1214 armv8_cache_d_inner_flush_virt(armv8,
1215 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1216 breakpoint->length);
1217
1218 armv8_cache_i_inner_inval_virt(armv8,
1219 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1220 breakpoint->length);
1221 }
1222 breakpoint->set = 0;
1223
1224 return ERROR_OK;
1225 }
1226
1227 static int aarch64_add_breakpoint(struct target *target,
1228 struct breakpoint *breakpoint)
1229 {
1230 struct aarch64_common *aarch64 = target_to_aarch64(target);
1231
1232 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1233 LOG_INFO("no hardware breakpoint available");
1234 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1235 }
1236
1237 if (breakpoint->type == BKPT_HARD)
1238 aarch64->brp_num_available--;
1239
1240 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1241 }
1242
1243 static int aarch64_add_context_breakpoint(struct target *target,
1244 struct breakpoint *breakpoint)
1245 {
1246 struct aarch64_common *aarch64 = target_to_aarch64(target);
1247
1248 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1249 LOG_INFO("no hardware breakpoint available");
1250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1251 }
1252
1253 if (breakpoint->type == BKPT_HARD)
1254 aarch64->brp_num_available--;
1255
1256 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1257 }
1258
1259 static int aarch64_add_hybrid_breakpoint(struct target *target,
1260 struct breakpoint *breakpoint)
1261 {
1262 struct aarch64_common *aarch64 = target_to_aarch64(target);
1263
1264 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1265 LOG_INFO("no hardware breakpoint available");
1266 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1267 }
1268
1269 if (breakpoint->type == BKPT_HARD)
1270 aarch64->brp_num_available--;
1271
1272 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1273 }
1274
1275
1276 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1277 {
1278 struct aarch64_common *aarch64 = target_to_aarch64(target);
1279
1280 #if 0
1281 /* It is perfectly possible to remove breakpoints while the target is running */
1282 if (target->state != TARGET_HALTED) {
1283 LOG_WARNING("target not halted");
1284 return ERROR_TARGET_NOT_HALTED;
1285 }
1286 #endif
1287
1288 if (breakpoint->set) {
1289 aarch64_unset_breakpoint(target, breakpoint);
1290 if (breakpoint->type == BKPT_HARD)
1291 aarch64->brp_num_available++;
1292 }
1293
1294 return ERROR_OK;
1295 }
1296
1297 /*
1298 * Cortex-A8 Reset functions
1299 */
1300
1301 static int aarch64_assert_reset(struct target *target)
1302 {
1303 struct armv8_common *armv8 = target_to_armv8(target);
1304
1305 LOG_DEBUG(" ");
1306
1307 /* FIXME when halt is requested, make it work somehow... */
1308
1309 /* Issue some kind of warm reset. */
1310 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1311 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1312 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1313 /* REVISIT handle "pulls" cases, if there's
1314 * hardware that needs them to work.
1315 */
1316 jtag_add_reset(0, 1);
1317 } else {
1318 LOG_ERROR("%s: how to reset?", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321
1322 /* registers are now invalid */
1323 if (target_was_examined(target))
1324 register_cache_invalidate(armv8->arm.core_cache);
1325
1326 target->state = TARGET_RESET;
1327
1328 return ERROR_OK;
1329 }
1330
1331 static int aarch64_deassert_reset(struct target *target)
1332 {
1333 int retval;
1334
1335 LOG_DEBUG(" ");
1336
1337 /* be certain SRST is off */
1338 jtag_add_reset(0, 0);
1339
1340 if (!target_was_examined(target))
1341 return ERROR_OK;
1342
1343 retval = aarch64_poll(target);
1344 if (retval != ERROR_OK)
1345 return retval;
1346
1347 if (target->reset_halt) {
1348 if (target->state != TARGET_HALTED) {
1349 LOG_WARNING("%s: ran after reset and before halt ...",
1350 target_name(target));
1351 retval = target_halt(target);
1352 if (retval != ERROR_OK)
1353 return retval;
1354 }
1355 }
1356
1357 return ERROR_OK;
1358 }
1359
1360 static int aarch64_write_apb_ap_memory(struct target *target,
1361 uint64_t address, uint32_t size,
1362 uint32_t count, const uint8_t *buffer)
1363 {
1364 /* write memory through APB-AP */
1365 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1366 struct armv8_common *armv8 = target_to_armv8(target);
1367 struct arm_dpm *dpm = &armv8->dpm;
1368 struct arm *arm = &armv8->arm;
1369 int total_bytes = count * size;
1370 int total_u32;
1371 int start_byte = address & 0x3;
1372 int end_byte = (address + total_bytes) & 0x3;
1373 struct reg *reg;
1374 uint32_t dscr;
1375 uint8_t *tmp_buff = NULL;
1376
1377 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count %" PRIu32,
1378 address, size, count);
1379
1380 if (target->state != TARGET_HALTED) {
1381 LOG_WARNING("target not halted");
1382 return ERROR_TARGET_NOT_HALTED;
1383 }
1384
1385 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1386
1387 /* Mark register R0 as dirty, as it will be used
1388 * for transferring the data.
1389 * It will be restored automatically when exiting
1390 * debug mode
1391 */
1392 reg = armv8_reg_current(arm, 1);
1393 reg->dirty = true;
1394
1395 reg = armv8_reg_current(arm, 0);
1396 reg->dirty = true;
1397
1398 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1399
1400 /* The algorithm only copies 32 bit words, so the buffer
1401 * should be expanded to include the words at either end.
1402 * The first and last words will be read first to avoid
1403 * corruption if needed.
1404 */
1405 tmp_buff = malloc(total_u32 * 4);
1406
1407 if ((start_byte != 0) && (total_u32 > 1)) {
1408 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1409 * the other bytes in the word.
1410 */
1411 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1412 if (retval != ERROR_OK)
1413 goto error_free_buff_w;
1414 }
1415
1416 /* If end of write is not aligned, or the write is less than 4 bytes */
1417 if ((end_byte != 0) ||
1418 ((total_u32 == 1) && (total_bytes != 4))) {
1419
1420 /* Read the last word to avoid corruption during 32 bit write */
1421 int mem_offset = (total_u32-1) * 4;
1422 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1423 if (retval != ERROR_OK)
1424 goto error_free_buff_w;
1425 }
1426
1427 /* Copy the write buffer over the top of the temporary buffer */
1428 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1429
1430 /* We now have a 32 bit aligned buffer that can be written */
1431
1432 /* Read DSCR */
1433 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1434 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1435 if (retval != ERROR_OK)
1436 goto error_free_buff_w;
1437
1438 /* Set Normal access mode */
1439 dscr = (dscr & ~DSCR_MA);
1440 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1441 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1442
1443 if (arm->core_state == ARM_STATE_AARCH64) {
1444 /* Write X0 with value 'address' using write procedure */
1445 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1446 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1447 retval = dpm->instr_write_data_dcc_64(dpm,
1448 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1449 } else {
1450 /* Write R0 with value 'address' using write procedure */
1451 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1452 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1453 dpm->instr_write_data_dcc(dpm,
1454 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1455
1456 }
1457 /* Step 1.d - Change DCC to memory mode */
1458 dscr = dscr | DSCR_MA;
1459 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1460 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1461 if (retval != ERROR_OK)
1462 goto error_unset_dtr_w;
1463
1464
1465 /* Step 2.a - Do the write */
1466 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1467 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1468 if (retval != ERROR_OK)
1469 goto error_unset_dtr_w;
1470
1471 /* Step 3.a - Switch DTR mode back to Normal mode */
1472 dscr = (dscr & ~DSCR_MA);
1473 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1474 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1475 if (retval != ERROR_OK)
1476 goto error_unset_dtr_w;
1477
1478 /* Check for sticky abort flags in the DSCR */
1479 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1480 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1481 if (retval != ERROR_OK)
1482 goto error_free_buff_w;
1483
1484 dpm->dscr = dscr;
1485 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1486 /* Abort occurred - clear it and exit */
1487 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1488 armv8_dpm_handle_exception(dpm);
1489 goto error_free_buff_w;
1490 }
1491
1492 /* Done */
1493 free(tmp_buff);
1494 return ERROR_OK;
1495
1496 error_unset_dtr_w:
1497 /* Unset DTR mode */
1498 mem_ap_read_atomic_u32(armv8->debug_ap,
1499 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1500 dscr = (dscr & ~DSCR_MA);
1501 mem_ap_write_atomic_u32(armv8->debug_ap,
1502 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1503 error_free_buff_w:
1504 LOG_ERROR("error");
1505 free(tmp_buff);
1506 return ERROR_FAIL;
1507 }
1508
1509 static int aarch64_read_apb_ap_memory(struct target *target,
1510 target_addr_t address, uint32_t size,
1511 uint32_t count, uint8_t *buffer)
1512 {
1513 /* read memory through APB-AP */
1514 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1515 struct armv8_common *armv8 = target_to_armv8(target);
1516 struct arm_dpm *dpm = &armv8->dpm;
1517 struct arm *arm = &armv8->arm;
1518 int total_bytes = count * size;
1519 int total_u32;
1520 int start_byte = address & 0x3;
1521 int end_byte = (address + total_bytes) & 0x3;
1522 struct reg *reg;
1523 uint32_t dscr;
1524 uint8_t *tmp_buff = NULL;
1525 uint8_t *u8buf_ptr;
1526 uint32_t value;
1527
1528 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count %" PRIu32,
1529 address, size, count);
1530
1531 if (target->state != TARGET_HALTED) {
1532 LOG_WARNING("target not halted");
1533 return ERROR_TARGET_NOT_HALTED;
1534 }
1535
1536 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1537 /* Mark register X0, X1 as dirty, as it will be used
1538 * for transferring the data.
1539 * It will be restored automatically when exiting
1540 * debug mode
1541 */
1542 reg = armv8_reg_current(arm, 1);
1543 reg->dirty = true;
1544
1545 reg = armv8_reg_current(arm, 0);
1546 reg->dirty = true;
1547
1548 /* Read DSCR */
1549 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1550 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1551
1552 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1553
1554 /* Set Normal access mode */
1555 dscr = (dscr & ~DSCR_MA);
1556 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1557 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1558
1559 if (arm->core_state == ARM_STATE_AARCH64) {
1560 /* Write X0 with value 'address' using write procedure */
1561 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1562 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1563 retval += dpm->instr_write_data_dcc_64(dpm,
1564 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1565 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1566 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1567 /* Step 1.e - Change DCC to memory mode */
1568 dscr = dscr | DSCR_MA;
1569 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1570 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1571 /* Step 1.f - read DBGDTRTX and discard the value */
1572 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1573 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1574 } else {
1575 /* Write R0 with value 'address' using write procedure */
1576 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1577 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1578 retval += dpm->instr_write_data_dcc(dpm,
1579 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1580 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1581 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1582 /* Step 1.e - Change DCC to memory mode */
1583 dscr = dscr | DSCR_MA;
1584 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1585 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1586 /* Step 1.f - read DBGDTRTX and discard the value */
1587 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1588 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1589
1590 }
1591 if (retval != ERROR_OK)
1592 goto error_unset_dtr_r;
1593
1594 /* Optimize the read as much as we can, either way we read in a single pass */
1595 if ((start_byte) || (end_byte)) {
1596 /* The algorithm only copies 32 bit words, so the buffer
1597 * should be expanded to include the words at either end.
1598 * The first and last words will be read into a temp buffer
1599 * to avoid corruption
1600 */
1601 tmp_buff = malloc(total_u32 * 4);
1602 if (!tmp_buff)
1603 goto error_unset_dtr_r;
1604
1605 /* use the tmp buffer to read the entire data */
1606 u8buf_ptr = tmp_buff;
1607 } else
1608 /* address and read length are aligned so read directly into the passed buffer */
1609 u8buf_ptr = buffer;
1610
1611 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1612 * Abort flags are sticky, so can be read at end of transactions
1613 *
1614 * This data is read in aligned to 32 bit boundary.
1615 */
1616
1617 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1618 * increments X0 by 4. */
1619 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1620 armv8->debug_base + CPUV8_DBG_DTRTX);
1621 if (retval != ERROR_OK)
1622 goto error_unset_dtr_r;
1623
1624 /* Step 3.a - set DTR access mode back to Normal mode */
1625 dscr = (dscr & ~DSCR_MA);
1626 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1627 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1628 if (retval != ERROR_OK)
1629 goto error_free_buff_r;
1630
1631 /* Step 3.b - read DBGDTRTX for the final value */
1632 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1633 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1634 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1635
1636 /* Check for sticky abort flags in the DSCR */
1637 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1638 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1639 if (retval != ERROR_OK)
1640 goto error_free_buff_r;
1641
1642 dpm->dscr = dscr;
1643
1644 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1645 /* Abort occurred - clear it and exit */
1646 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1647 armv8_dpm_handle_exception(dpm);
1648 goto error_free_buff_r;
1649 }
1650
1651 /* check if we need to copy aligned data by applying any shift necessary */
1652 if (tmp_buff) {
1653 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1654 free(tmp_buff);
1655 }
1656
1657 /* Done */
1658 return ERROR_OK;
1659
1660 error_unset_dtr_r:
1661 /* Unset DTR mode */
1662 mem_ap_read_atomic_u32(armv8->debug_ap,
1663 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1664 dscr = (dscr & ~DSCR_MA);
1665 mem_ap_write_atomic_u32(armv8->debug_ap,
1666 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1667 error_free_buff_r:
1668 LOG_ERROR("error");
1669 free(tmp_buff);
1670 return ERROR_FAIL;
1671 }
1672
1673 static int aarch64_read_phys_memory(struct target *target,
1674 target_addr_t address, uint32_t size,
1675 uint32_t count, uint8_t *buffer)
1676 {
1677 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1678 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1679 address, size, count);
1680
1681 if (count && buffer) {
1682 /* read memory through APB-AP */
1683 retval = aarch64_mmu_modify(target, 0);
1684 if (retval != ERROR_OK)
1685 return retval;
1686 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1687 }
1688 return retval;
1689 }
1690
1691 static int aarch64_read_memory(struct target *target, target_addr_t address,
1692 uint32_t size, uint32_t count, uint8_t *buffer)
1693 {
1694 int mmu_enabled = 0;
1695 int retval;
1696
1697 /* aarch64 handles unaligned memory access */
1698 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1699 size, count);
1700
1701 /* determine if MMU was enabled on target stop */
1702 retval = aarch64_mmu(target, &mmu_enabled);
1703 if (retval != ERROR_OK)
1704 return retval;
1705
1706 if (mmu_enabled) {
1707 retval = aarch64_check_address(target, address);
1708 if (retval != ERROR_OK)
1709 return retval;
1710 /* enable MMU as we could have disabled it for phys access */
1711 retval = aarch64_mmu_modify(target, 1);
1712 if (retval != ERROR_OK)
1713 return retval;
1714 }
1715 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1716 }
1717
1718 static int aarch64_write_phys_memory(struct target *target,
1719 target_addr_t address, uint32_t size,
1720 uint32_t count, const uint8_t *buffer)
1721 {
1722 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1723
1724 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1725 size, count);
1726
1727 if (count && buffer) {
1728 /* write memory through APB-AP */
1729 retval = aarch64_mmu_modify(target, 0);
1730 if (retval != ERROR_OK)
1731 return retval;
1732 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1733 }
1734
1735 return retval;
1736 }
1737
1738 static int aarch64_write_memory(struct target *target, target_addr_t address,
1739 uint32_t size, uint32_t count, const uint8_t *buffer)
1740 {
1741 int mmu_enabled = 0;
1742 int retval;
1743
1744 /* aarch64 handles unaligned memory access */
1745 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1746 "; count %" PRId32, address, size, count);
1747
1748 /* determine if MMU was enabled on target stop */
1749 retval = aarch64_mmu(target, &mmu_enabled);
1750 if (retval != ERROR_OK)
1751 return retval;
1752
1753 if (mmu_enabled) {
1754 retval = aarch64_check_address(target, address);
1755 if (retval != ERROR_OK)
1756 return retval;
1757 /* enable MMU as we could have disabled it for phys access */
1758 retval = aarch64_mmu_modify(target, 1);
1759 if (retval != ERROR_OK)
1760 return retval;
1761 }
1762 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1763 }
1764
1765 static int aarch64_handle_target_request(void *priv)
1766 {
1767 struct target *target = priv;
1768 struct armv8_common *armv8 = target_to_armv8(target);
1769 int retval;
1770
1771 if (!target_was_examined(target))
1772 return ERROR_OK;
1773 if (!target->dbg_msg_enabled)
1774 return ERROR_OK;
1775
1776 if (target->state == TARGET_RUNNING) {
1777 uint32_t request;
1778 uint32_t dscr;
1779 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1780 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1781
1782 /* check if we have data */
1783 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1784 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1785 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1786 if (retval == ERROR_OK) {
1787 target_request(target, request);
1788 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1789 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1790 }
1791 }
1792 }
1793
1794 return ERROR_OK;
1795 }
1796
1797 static int aarch64_examine_first(struct target *target)
1798 {
1799 struct aarch64_common *aarch64 = target_to_aarch64(target);
1800 struct armv8_common *armv8 = &aarch64->armv8_common;
1801 struct adiv5_dap *swjdp = armv8->arm.dap;
1802 int i;
1803 int retval = ERROR_OK;
1804 uint64_t debug, ttypr;
1805 uint32_t cpuid;
1806 uint32_t tmp0, tmp1;
1807 debug = ttypr = cpuid = 0;
1808
1809 /* We do one extra read to ensure DAP is configured,
1810 * we call ahbap_debugport_init(swjdp) instead
1811 */
1812 retval = dap_dp_init(swjdp);
1813 if (retval != ERROR_OK)
1814 return retval;
1815
1816 /* Search for the APB-AB - it is needed for access to debug registers */
1817 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1818 if (retval != ERROR_OK) {
1819 LOG_ERROR("Could not find APB-AP for debug access");
1820 return retval;
1821 }
1822
1823 retval = mem_ap_init(armv8->debug_ap);
1824 if (retval != ERROR_OK) {
1825 LOG_ERROR("Could not initialize the APB-AP");
1826 return retval;
1827 }
1828
1829 armv8->debug_ap->memaccess_tck = 80;
1830
1831 if (!target->dbgbase_set) {
1832 uint32_t dbgbase;
1833 /* Get ROM Table base */
1834 uint32_t apid;
1835 int32_t coreidx = target->coreid;
1836 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1837 if (retval != ERROR_OK)
1838 return retval;
1839 /* Lookup 0x15 -- Processor DAP */
1840 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1841 &armv8->debug_base, &coreidx);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1845 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1846 } else
1847 armv8->debug_base = target->dbgbase;
1848
1849 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1850 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1851 if (retval != ERROR_OK) {
1852 LOG_DEBUG("LOCK debug access fail");
1853 return retval;
1854 }
1855
1856 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1857 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1858 if (retval != ERROR_OK) {
1859 LOG_DEBUG("Examine %s failed", "oslock");
1860 return retval;
1861 }
1862
1863 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1864 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1865 if (retval != ERROR_OK) {
1866 LOG_DEBUG("Examine %s failed", "CPUID");
1867 return retval;
1868 }
1869
1870 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1871 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1872 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1873 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1874 if (retval != ERROR_OK) {
1875 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1876 return retval;
1877 }
1878 ttypr |= tmp1;
1879 ttypr = (ttypr << 32) | tmp0;
1880
1881 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1882 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1883 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1884 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1885 if (retval != ERROR_OK) {
1886 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1887 return retval;
1888 }
1889 debug |= tmp1;
1890 debug = (debug << 32) | tmp0;
1891
1892 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1893 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1894 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1895
1896 if (target->ctibase == 0) {
1897 /* assume a v8 rom table layout */
1898 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1899 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1900 } else
1901 armv8->cti_base = target->ctibase;
1902
1903 armv8->arm.core_type = ARM_MODE_MON;
1904 retval = aarch64_dpm_setup(aarch64, debug);
1905 if (retval != ERROR_OK)
1906 return retval;
1907
1908 /* Setup Breakpoint Register Pairs */
1909 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1910 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1911 aarch64->brp_num_available = aarch64->brp_num;
1912 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1913 for (i = 0; i < aarch64->brp_num; i++) {
1914 aarch64->brp_list[i].used = 0;
1915 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1916 aarch64->brp_list[i].type = BRP_NORMAL;
1917 else
1918 aarch64->brp_list[i].type = BRP_CONTEXT;
1919 aarch64->brp_list[i].value = 0;
1920 aarch64->brp_list[i].control = 0;
1921 aarch64->brp_list[i].BRPn = i;
1922 }
1923
1924 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1925
1926 target_set_examined(target);
1927 return ERROR_OK;
1928 }
1929
1930 static int aarch64_examine(struct target *target)
1931 {
1932 int retval = ERROR_OK;
1933
1934 /* don't re-probe hardware after each reset */
1935 if (!target_was_examined(target))
1936 retval = aarch64_examine_first(target);
1937
1938 /* Configure core debug access */
1939 if (retval == ERROR_OK)
1940 retval = aarch64_init_debug_access(target);
1941
1942 return retval;
1943 }
1944
1945 /*
1946 * Cortex-A8 target creation and initialization
1947 */
1948
1949 static int aarch64_init_target(struct command_context *cmd_ctx,
1950 struct target *target)
1951 {
1952 /* examine_first() does a bunch of this */
1953 return ERROR_OK;
1954 }
1955
1956 static int aarch64_init_arch_info(struct target *target,
1957 struct aarch64_common *aarch64, struct jtag_tap *tap)
1958 {
1959 struct armv8_common *armv8 = &aarch64->armv8_common;
1960 struct adiv5_dap *dap = armv8->arm.dap;
1961
1962 armv8->arm.dap = dap;
1963
1964 /* Setup struct aarch64_common */
1965 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1966 /* tap has no dap initialized */
1967 if (!tap->dap) {
1968 tap->dap = dap_init();
1969
1970 /* Leave (only) generic DAP stuff for debugport_init() */
1971 tap->dap->tap = tap;
1972 }
1973
1974 armv8->arm.dap = tap->dap;
1975
1976 aarch64->fast_reg_read = 0;
1977
1978 /* register arch-specific functions */
1979 armv8->examine_debug_reason = NULL;
1980
1981 armv8->post_debug_entry = aarch64_post_debug_entry;
1982
1983 armv8->pre_restore_context = NULL;
1984
1985 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1986
1987 /* REVISIT v7a setup should be in a v7a-specific routine */
1988 armv8_init_arch_info(target, armv8);
1989 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1990
1991 return ERROR_OK;
1992 }
1993
1994 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1995 {
1996 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1997
1998 return aarch64_init_arch_info(target, aarch64, target->tap);
1999 }
2000
2001 static int aarch64_mmu(struct target *target, int *enabled)
2002 {
2003 if (target->state != TARGET_HALTED) {
2004 LOG_ERROR("%s: target not halted", __func__);
2005 return ERROR_TARGET_INVALID;
2006 }
2007
2008 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2009 return ERROR_OK;
2010 }
2011
2012 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2013 target_addr_t *phys)
2014 {
2015 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2016 }
2017
2018 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2019 {
2020 struct target *target = get_current_target(CMD_CTX);
2021 struct armv8_common *armv8 = target_to_armv8(target);
2022
2023 return armv8_handle_cache_info_command(CMD_CTX,
2024 &armv8->armv8_mmu.armv8_cache);
2025 }
2026
2027
2028 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2029 {
2030 struct target *target = get_current_target(CMD_CTX);
2031 if (!target_was_examined(target)) {
2032 LOG_ERROR("target not examined yet");
2033 return ERROR_FAIL;
2034 }
2035
2036 return aarch64_init_debug_access(target);
2037 }
2038 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2039 {
2040 struct target *target = get_current_target(CMD_CTX);
2041 /* check target is an smp target */
2042 struct target_list *head;
2043 struct target *curr;
2044 head = target->head;
2045 target->smp = 0;
2046 if (head != (struct target_list *)NULL) {
2047 while (head != (struct target_list *)NULL) {
2048 curr = head->target;
2049 curr->smp = 0;
2050 head = head->next;
2051 }
2052 /* fixes the target display to the debugger */
2053 target->gdb_service->target = target;
2054 }
2055 return ERROR_OK;
2056 }
2057
2058 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2059 {
2060 struct target *target = get_current_target(CMD_CTX);
2061 struct target_list *head;
2062 struct target *curr;
2063 head = target->head;
2064 if (head != (struct target_list *)NULL) {
2065 target->smp = 1;
2066 while (head != (struct target_list *)NULL) {
2067 curr = head->target;
2068 curr->smp = 1;
2069 head = head->next;
2070 }
2071 }
2072 return ERROR_OK;
2073 }
2074
2075 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2076 {
2077 struct target *target = get_current_target(CMD_CTX);
2078 int retval = ERROR_OK;
2079 struct target_list *head;
2080 head = target->head;
2081 if (head != (struct target_list *)NULL) {
2082 if (CMD_ARGC == 1) {
2083 int coreid = 0;
2084 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2085 if (ERROR_OK != retval)
2086 return retval;
2087 target->gdb_service->core[1] = coreid;
2088
2089 }
2090 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2091 , target->gdb_service->core[1]);
2092 }
2093 return ERROR_OK;
2094 }
2095
2096 static const struct command_registration aarch64_exec_command_handlers[] = {
2097 {
2098 .name = "cache_info",
2099 .handler = aarch64_handle_cache_info_command,
2100 .mode = COMMAND_EXEC,
2101 .help = "display information about target caches",
2102 .usage = "",
2103 },
2104 {
2105 .name = "dbginit",
2106 .handler = aarch64_handle_dbginit_command,
2107 .mode = COMMAND_EXEC,
2108 .help = "Initialize core debug",
2109 .usage = "",
2110 },
2111 { .name = "smp_off",
2112 .handler = aarch64_handle_smp_off_command,
2113 .mode = COMMAND_EXEC,
2114 .help = "Stop smp handling",
2115 .usage = "",
2116 },
2117 {
2118 .name = "smp_on",
2119 .handler = aarch64_handle_smp_on_command,
2120 .mode = COMMAND_EXEC,
2121 .help = "Restart smp handling",
2122 .usage = "",
2123 },
2124 {
2125 .name = "smp_gdb",
2126 .handler = aarch64_handle_smp_gdb_command,
2127 .mode = COMMAND_EXEC,
2128 .help = "display/fix current core played to gdb",
2129 .usage = "",
2130 },
2131
2132
2133 COMMAND_REGISTRATION_DONE
2134 };
2135 static const struct command_registration aarch64_command_handlers[] = {
2136 {
2137 .chain = armv8_command_handlers,
2138 },
2139 {
2140 .name = "cortex_a",
2141 .mode = COMMAND_ANY,
2142 .help = "Cortex-A command group",
2143 .usage = "",
2144 .chain = aarch64_exec_command_handlers,
2145 },
2146 COMMAND_REGISTRATION_DONE
2147 };
2148
2149 struct target_type aarch64_target = {
2150 .name = "aarch64",
2151
2152 .poll = aarch64_poll,
2153 .arch_state = armv8_arch_state,
2154
2155 .halt = aarch64_halt,
2156 .resume = aarch64_resume,
2157 .step = aarch64_step,
2158
2159 .assert_reset = aarch64_assert_reset,
2160 .deassert_reset = aarch64_deassert_reset,
2161
2162 /* REVISIT allow exporting VFP3 registers ... */
2163 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2164
2165 .read_memory = aarch64_read_memory,
2166 .write_memory = aarch64_write_memory,
2167
2168 .checksum_memory = arm_checksum_memory,
2169 .blank_check_memory = arm_blank_check_memory,
2170
2171 .run_algorithm = armv4_5_run_algorithm,
2172
2173 .add_breakpoint = aarch64_add_breakpoint,
2174 .add_context_breakpoint = aarch64_add_context_breakpoint,
2175 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2176 .remove_breakpoint = aarch64_remove_breakpoint,
2177 .add_watchpoint = NULL,
2178 .remove_watchpoint = NULL,
2179
2180 .commands = aarch64_command_handlers,
2181 .target_create = aarch64_target_create,
2182 .init_target = aarch64_init_target,
2183 .examine = aarch64_examine,
2184
2185 .read_phys_memory = aarch64_read_phys_memory,
2186 .write_phys_memory = aarch64_write_phys_memory,
2187 .mmu = aarch64_mmu,
2188 .virt2phys = aarch64_virt2phys,
2189 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)