aarch64: consolidate sticky error handling
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 uint32_t dscr;
286
287 /* Read DSCR */
288 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
289 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
290 if (ERROR_OK != retval)
291 return retval;
292
293 /* clear bitfield */
294 dscr &= ~bit_mask;
295 /* put new value */
296 dscr |= value & bit_mask;
297
298 /* write new DSCR */
299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
300 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
301 return retval;
302 }
303
304 static struct target *get_aarch64(struct target *target, int32_t coreid)
305 {
306 struct target_list *head;
307 struct target *curr;
308
309 head = target->head;
310 while (head != (struct target_list *)NULL) {
311 curr = head->target;
312 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
313 return curr;
314 head = head->next;
315 }
316 return target;
317 }
318 static int aarch64_halt(struct target *target);
319
320 static int aarch64_halt_smp(struct target *target)
321 {
322 int retval = ERROR_OK;
323 struct target_list *head = target->head;
324
325 while (head != (struct target_list *)NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328
329 /* open the gate for channel 0 to let HALT requests pass to the CTM */
330 if (curr->smp) {
331 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
332 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
333 if (retval == ERROR_OK)
334 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
335 }
336 if (retval != ERROR_OK)
337 break;
338
339 head = head->next;
340 }
341
342 /* halt the target PE */
343 if (retval == ERROR_OK)
344 retval = aarch64_halt(target);
345
346 return retval;
347 }
348
349 static int update_halt_gdb(struct target *target)
350 {
351 int retval = 0;
352 if (target->gdb_service && target->gdb_service->core[0] == -1) {
353 target->gdb_service->target = target;
354 target->gdb_service->core[0] = target->coreid;
355 retval += aarch64_halt_smp(target);
356 }
357 return retval;
358 }
359
360 /*
361 * Cortex-A8 Run control
362 */
363
364 static int aarch64_poll(struct target *target)
365 {
366 int retval = ERROR_OK;
367 uint32_t dscr;
368 struct aarch64_common *aarch64 = target_to_aarch64(target);
369 struct armv8_common *armv8 = &aarch64->armv8_common;
370 enum target_state prev_target_state = target->state;
371 /* toggle to another core is done by gdb as follow */
372 /* maint packet J core_id */
373 /* continue */
374 /* the next polling trigger an halt event sent to gdb */
375 if ((target->state == TARGET_HALTED) && (target->smp) &&
376 (target->gdb_service) &&
377 (target->gdb_service->target == NULL)) {
378 target->gdb_service->target =
379 get_aarch64(target, target->gdb_service->core[1]);
380 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
381 return retval;
382 }
383 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
384 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
385 if (retval != ERROR_OK)
386 return retval;
387 aarch64->cpudbg_dscr = dscr;
388
389 if (DSCR_RUN_MODE(dscr) == 0x3) {
390 if (prev_target_state != TARGET_HALTED) {
391 /* We have a halting debug event */
392 LOG_DEBUG("Target halted");
393 target->state = TARGET_HALTED;
394 if ((prev_target_state == TARGET_RUNNING)
395 || (prev_target_state == TARGET_UNKNOWN)
396 || (prev_target_state == TARGET_RESET)) {
397 retval = aarch64_debug_entry(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (target->smp) {
401 retval = update_halt_gdb(target);
402 if (retval != ERROR_OK)
403 return retval;
404 }
405 target_call_event_callbacks(target,
406 TARGET_EVENT_HALTED);
407 }
408 if (prev_target_state == TARGET_DEBUG_RUNNING) {
409 LOG_DEBUG(" ");
410
411 retval = aarch64_debug_entry(target);
412 if (retval != ERROR_OK)
413 return retval;
414 if (target->smp) {
415 retval = update_halt_gdb(target);
416 if (retval != ERROR_OK)
417 return retval;
418 }
419
420 target_call_event_callbacks(target,
421 TARGET_EVENT_DEBUG_HALTED);
422 }
423 }
424 } else
425 target->state = TARGET_RUNNING;
426
427 return retval;
428 }
429
430 static int aarch64_halt(struct target *target)
431 {
432 int retval = ERROR_OK;
433 uint32_t dscr;
434 struct armv8_common *armv8 = target_to_armv8(target);
435
436 /*
437 * add HDE in halting debug mode
438 */
439 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
440 if (retval != ERROR_OK)
441 return retval;
442
443 /* trigger an event on channel 0, this outputs a halt request to the PE */
444 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
445 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
446 if (retval != ERROR_OK)
447 return retval;
448
449 long long then = timeval_ms();
450 for (;; ) {
451 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
452 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 if ((dscr & DSCRV8_HALT_MASK) != 0)
456 break;
457 if (timeval_ms() > then + 1000) {
458 LOG_ERROR("Timeout waiting for halt");
459 return ERROR_FAIL;
460 }
461 }
462
463 target->debug_reason = DBG_REASON_DBGRQ;
464
465 return ERROR_OK;
466 }
467
468 static int aarch64_internal_restore(struct target *target, int current,
469 uint64_t *address, int handle_breakpoints, int debug_execution)
470 {
471 struct armv8_common *armv8 = target_to_armv8(target);
472 struct arm *arm = &armv8->arm;
473 int retval;
474 uint64_t resume_pc;
475
476 if (!debug_execution)
477 target_free_all_working_areas(target);
478
479 /* current = 1: continue on current pc, otherwise continue at <address> */
480 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
481 if (!current)
482 resume_pc = *address;
483 else
484 *address = resume_pc;
485
486 /* Make sure that the Armv7 gdb thumb fixups does not
487 * kill the return address
488 */
489 switch (arm->core_state) {
490 case ARM_STATE_ARM:
491 resume_pc &= 0xFFFFFFFC;
492 break;
493 case ARM_STATE_AARCH64:
494 resume_pc &= 0xFFFFFFFFFFFFFFFC;
495 break;
496 case ARM_STATE_THUMB:
497 case ARM_STATE_THUMB_EE:
498 /* When the return address is loaded into PC
499 * bit 0 must be 1 to stay in Thumb state
500 */
501 resume_pc |= 0x1;
502 break;
503 case ARM_STATE_JAZELLE:
504 LOG_ERROR("How do I resume into Jazelle state??");
505 return ERROR_FAIL;
506 }
507 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
508 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
509 arm->pc->dirty = 1;
510 arm->pc->valid = 1;
511
512 /* called it now before restoring context because it uses cpu
513 * register r0 for restoring system control register */
514 retval = aarch64_restore_system_control_reg(target);
515 if (retval == ERROR_OK)
516 retval = aarch64_restore_context(target, handle_breakpoints);
517
518 return retval;
519 }
520
521 static int aarch64_internal_restart(struct target *target, bool slave_pe)
522 {
523 struct armv8_common *armv8 = target_to_armv8(target);
524 struct arm *arm = &armv8->arm;
525 int retval;
526 uint32_t dscr;
527 /*
528 * * Restart core and wait for it to be started. Clear ITRen and sticky
529 * * exception flags: see ARMv7 ARM, C5.9.
530 *
531 * REVISIT: for single stepping, we probably want to
532 * disable IRQs by default, with optional override...
533 */
534
535 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
536 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if ((dscr & DSCR_ITE) == 0)
541 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
542 if ((dscr & DSCR_ERR) != 0)
543 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
544
545 /* make sure to acknowledge the halt event before resuming */
546 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
547 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
548
549 /*
550 * open the CTI gate for channel 1 so that the restart events
551 * get passed along to all PEs
552 */
553 if (retval == ERROR_OK)
554 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
555 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
556 if (retval != ERROR_OK)
557 return retval;
558
559 if (!slave_pe) {
560 /* trigger an event on channel 1, generates a restart request to the PE */
561 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
562 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
563 if (retval != ERROR_OK)
564 return retval;
565
566 long long then = timeval_ms();
567 for (;; ) {
568 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
569 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
570 if (retval != ERROR_OK)
571 return retval;
572 if ((dscr & DSCR_HDE) != 0)
573 break;
574 if (timeval_ms() > then + 1000) {
575 LOG_ERROR("Timeout waiting for resume");
576 return ERROR_FAIL;
577 }
578 }
579 }
580
581 target->debug_reason = DBG_REASON_NOTHALTED;
582 target->state = TARGET_RUNNING;
583
584 /* registers are now invalid */
585 register_cache_invalidate(arm->core_cache);
586 register_cache_invalidate(arm->core_cache->next);
587
588 return ERROR_OK;
589 }
590
591 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
592 {
593 int retval = 0;
594 struct target_list *head;
595 struct target *curr;
596 uint64_t address;
597 head = target->head;
598 while (head != (struct target_list *)NULL) {
599 curr = head->target;
600 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
601 /* resume current address , not in step mode */
602 retval += aarch64_internal_restore(curr, 1, &address,
603 handle_breakpoints, 0);
604 retval += aarch64_internal_restart(curr, true);
605 }
606 head = head->next;
607
608 }
609 return retval;
610 }
611
612 static int aarch64_resume(struct target *target, int current,
613 target_addr_t address, int handle_breakpoints, int debug_execution)
614 {
615 int retval = 0;
616 uint64_t addr = address;
617
618 /* dummy resume for smp toggle in order to reduce gdb impact */
619 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
620 /* simulate a start and halt of target */
621 target->gdb_service->target = NULL;
622 target->gdb_service->core[0] = target->gdb_service->core[1];
623 /* fake resume at next poll we play the target core[1], see poll*/
624 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
625 return 0;
626 }
627 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
628 debug_execution);
629 if (target->smp) {
630 target->gdb_service->core[0] = -1;
631 retval = aarch64_restore_smp(target, handle_breakpoints);
632 if (retval != ERROR_OK)
633 return retval;
634 }
635 aarch64_internal_restart(target, false);
636
637 if (!debug_execution) {
638 target->state = TARGET_RUNNING;
639 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
640 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
641 } else {
642 target->state = TARGET_DEBUG_RUNNING;
643 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
644 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
645 }
646
647 return ERROR_OK;
648 }
649
650 static int aarch64_debug_entry(struct target *target)
651 {
652 int retval = ERROR_OK;
653 struct aarch64_common *aarch64 = target_to_aarch64(target);
654 struct armv8_common *armv8 = target_to_armv8(target);
655 struct arm_dpm *dpm = &armv8->dpm;
656 enum arm_state core_state;
657
658 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
659
660 dpm->dscr = aarch64->cpudbg_dscr;
661 core_state = armv8_dpm_get_core_state(dpm);
662 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
663 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
664
665 /* make sure to clear all sticky errors */
666 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
667 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
668
669 /* discard async exceptions */
670 if (retval == ERROR_OK)
671 retval = dpm->instr_cpsr_sync(dpm);
672
673 if (retval != ERROR_OK)
674 return retval;
675
676 /* Examine debug reason */
677 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
678
679 /* save address of instruction that triggered the watchpoint? */
680 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
681 uint32_t tmp;
682 uint64_t wfar = 0;
683
684 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
685 armv8->debug_base + CPUV8_DBG_WFAR1,
686 &tmp);
687 if (retval != ERROR_OK)
688 return retval;
689 wfar = tmp;
690 wfar = (wfar << 32);
691 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
692 armv8->debug_base + CPUV8_DBG_WFAR0,
693 &tmp);
694 if (retval != ERROR_OK)
695 return retval;
696 wfar |= tmp;
697 armv8_dpm_report_wfar(&armv8->dpm, wfar);
698 }
699
700 retval = armv8_dpm_read_current_registers(&armv8->dpm);
701
702 if (retval == ERROR_OK && armv8->post_debug_entry)
703 retval = armv8->post_debug_entry(target);
704
705 return retval;
706 }
707
708 static int aarch64_post_debug_entry(struct target *target)
709 {
710 struct aarch64_common *aarch64 = target_to_aarch64(target);
711 struct armv8_common *armv8 = &aarch64->armv8_common;
712 int retval;
713
714 switch (armv8->arm.core_mode) {
715 case ARMV8_64_EL0T:
716 armv8_dpm_modeswitch(&armv8->dpm, ARMV8_64_EL1H);
717 /* fall through */
718 case ARMV8_64_EL1T:
719 case ARMV8_64_EL1H:
720 retval = armv8->arm.mrs(target, 3, /*op 0*/
721 0, 0, /* op1, op2 */
722 1, 0, /* CRn, CRm */
723 &aarch64->system_control_reg);
724 if (retval != ERROR_OK)
725 return retval;
726 break;
727 case ARMV8_64_EL2T:
728 case ARMV8_64_EL2H:
729 retval = armv8->arm.mrs(target, 3, /*op 0*/
730 4, 0, /* op1, op2 */
731 1, 0, /* CRn, CRm */
732 &aarch64->system_control_reg);
733 if (retval != ERROR_OK)
734 return retval;
735 break;
736 case ARMV8_64_EL3H:
737 case ARMV8_64_EL3T:
738 retval = armv8->arm.mrs(target, 3, /*op 0*/
739 6, 0, /* op1, op2 */
740 1, 0, /* CRn, CRm */
741 &aarch64->system_control_reg);
742 if (retval != ERROR_OK)
743 return retval;
744 break;
745
746 case ARM_MODE_SVC:
747 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
748 if (retval != ERROR_OK)
749 return retval;
750 break;
751
752 default:
753 LOG_INFO("cannot read system control register in this mode");
754 break;
755 }
756
757 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
758
759 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
760 aarch64->system_control_reg_curr = aarch64->system_control_reg;
761
762 if (armv8->armv8_mmu.armv8_cache.info == -1) {
763 armv8_identify_cache(armv8);
764 armv8_read_mpidr(armv8);
765 }
766
767 armv8->armv8_mmu.mmu_enabled =
768 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
769 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
770 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
771 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
772 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
773 aarch64->curr_mode = armv8->arm.core_mode;
774 return ERROR_OK;
775 }
776
777 static int aarch64_step(struct target *target, int current, target_addr_t address,
778 int handle_breakpoints)
779 {
780 struct armv8_common *armv8 = target_to_armv8(target);
781 int retval;
782 uint32_t edecr;
783
784 if (target->state != TARGET_HALTED) {
785 LOG_WARNING("target not halted");
786 return ERROR_TARGET_NOT_HALTED;
787 }
788
789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
790 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
791 if (retval != ERROR_OK)
792 return retval;
793
794 /* make sure EDECR.SS is not set when restoring the register */
795 edecr &= ~0x4;
796
797 /* set EDECR.SS to enter hardware step mode */
798 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
799 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
800 if (retval != ERROR_OK)
801 return retval;
802
803 /* disable interrupts while stepping */
804 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
805 if (retval != ERROR_OK)
806 return ERROR_OK;
807
808 /* resume the target */
809 retval = aarch64_resume(target, current, address, 0, 0);
810 if (retval != ERROR_OK)
811 return retval;
812
813 long long then = timeval_ms();
814 while (target->state != TARGET_HALTED) {
815 retval = aarch64_poll(target);
816 if (retval != ERROR_OK)
817 return retval;
818 if (timeval_ms() > then + 1000) {
819 LOG_ERROR("timeout waiting for target halt");
820 return ERROR_FAIL;
821 }
822 }
823
824 /* restore EDECR */
825 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
826 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
827 if (retval != ERROR_OK)
828 return retval;
829
830 /* restore interrupts */
831 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
832 if (retval != ERROR_OK)
833 return ERROR_OK;
834
835 return ERROR_OK;
836 }
837
838 static int aarch64_restore_context(struct target *target, bool bpwp)
839 {
840 struct armv8_common *armv8 = target_to_armv8(target);
841
842 LOG_DEBUG(" ");
843
844 if (armv8->pre_restore_context)
845 armv8->pre_restore_context(target);
846
847 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
848
849 }
850
851 /*
852 * Cortex-A8 Breakpoint and watchpoint functions
853 */
854
855 /* Setup hardware Breakpoint Register Pair */
856 static int aarch64_set_breakpoint(struct target *target,
857 struct breakpoint *breakpoint, uint8_t matchmode)
858 {
859 int retval;
860 int brp_i = 0;
861 uint32_t control;
862 uint8_t byte_addr_select = 0x0F;
863 struct aarch64_common *aarch64 = target_to_aarch64(target);
864 struct armv8_common *armv8 = &aarch64->armv8_common;
865 struct aarch64_brp *brp_list = aarch64->brp_list;
866
867 if (breakpoint->set) {
868 LOG_WARNING("breakpoint already set");
869 return ERROR_OK;
870 }
871
872 if (breakpoint->type == BKPT_HARD) {
873 int64_t bpt_value;
874 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
875 brp_i++;
876 if (brp_i >= aarch64->brp_num) {
877 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
878 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
879 }
880 breakpoint->set = brp_i + 1;
881 if (breakpoint->length == 2)
882 byte_addr_select = (3 << (breakpoint->address & 0x02));
883 control = ((matchmode & 0x7) << 20)
884 | (1 << 13)
885 | (byte_addr_select << 5)
886 | (3 << 1) | 1;
887 brp_list[brp_i].used = 1;
888 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
889 brp_list[brp_i].control = control;
890 bpt_value = brp_list[brp_i].value;
891
892 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
893 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
894 (uint32_t)(bpt_value & 0xFFFFFFFF));
895 if (retval != ERROR_OK)
896 return retval;
897 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
898 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
899 (uint32_t)(bpt_value >> 32));
900 if (retval != ERROR_OK)
901 return retval;
902
903 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
904 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
905 brp_list[brp_i].control);
906 if (retval != ERROR_OK)
907 return retval;
908 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
909 brp_list[brp_i].control,
910 brp_list[brp_i].value);
911
912 } else if (breakpoint->type == BKPT_SOFT) {
913 uint8_t code[4];
914
915 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
916 retval = target_read_memory(target,
917 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
918 breakpoint->length, 1,
919 breakpoint->orig_instr);
920 if (retval != ERROR_OK)
921 return retval;
922
923 armv8_cache_d_inner_flush_virt(armv8,
924 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
925 breakpoint->length);
926
927 retval = target_write_memory(target,
928 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
929 breakpoint->length, 1, code);
930 if (retval != ERROR_OK)
931 return retval;
932
933 armv8_cache_d_inner_flush_virt(armv8,
934 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
935 breakpoint->length);
936
937 armv8_cache_i_inner_inval_virt(armv8,
938 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
939 breakpoint->length);
940
941 breakpoint->set = 0x11; /* Any nice value but 0 */
942 }
943
944 /* Ensure that halting debug mode is enable */
945 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
946 if (retval != ERROR_OK) {
947 LOG_DEBUG("Failed to set DSCR.HDE");
948 return retval;
949 }
950
951 return ERROR_OK;
952 }
953
954 static int aarch64_set_context_breakpoint(struct target *target,
955 struct breakpoint *breakpoint, uint8_t matchmode)
956 {
957 int retval = ERROR_FAIL;
958 int brp_i = 0;
959 uint32_t control;
960 uint8_t byte_addr_select = 0x0F;
961 struct aarch64_common *aarch64 = target_to_aarch64(target);
962 struct armv8_common *armv8 = &aarch64->armv8_common;
963 struct aarch64_brp *brp_list = aarch64->brp_list;
964
965 if (breakpoint->set) {
966 LOG_WARNING("breakpoint already set");
967 return retval;
968 }
969 /*check available context BRPs*/
970 while ((brp_list[brp_i].used ||
971 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
972 brp_i++;
973
974 if (brp_i >= aarch64->brp_num) {
975 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
976 return ERROR_FAIL;
977 }
978
979 breakpoint->set = brp_i + 1;
980 control = ((matchmode & 0x7) << 20)
981 | (1 << 13)
982 | (byte_addr_select << 5)
983 | (3 << 1) | 1;
984 brp_list[brp_i].used = 1;
985 brp_list[brp_i].value = (breakpoint->asid);
986 brp_list[brp_i].control = control;
987 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
988 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
989 brp_list[brp_i].value);
990 if (retval != ERROR_OK)
991 return retval;
992 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
993 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
994 brp_list[brp_i].control);
995 if (retval != ERROR_OK)
996 return retval;
997 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
998 brp_list[brp_i].control,
999 brp_list[brp_i].value);
1000 return ERROR_OK;
1001
1002 }
1003
1004 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1005 {
1006 int retval = ERROR_FAIL;
1007 int brp_1 = 0; /* holds the contextID pair */
1008 int brp_2 = 0; /* holds the IVA pair */
1009 uint32_t control_CTX, control_IVA;
1010 uint8_t CTX_byte_addr_select = 0x0F;
1011 uint8_t IVA_byte_addr_select = 0x0F;
1012 uint8_t CTX_machmode = 0x03;
1013 uint8_t IVA_machmode = 0x01;
1014 struct aarch64_common *aarch64 = target_to_aarch64(target);
1015 struct armv8_common *armv8 = &aarch64->armv8_common;
1016 struct aarch64_brp *brp_list = aarch64->brp_list;
1017
1018 if (breakpoint->set) {
1019 LOG_WARNING("breakpoint already set");
1020 return retval;
1021 }
1022 /*check available context BRPs*/
1023 while ((brp_list[brp_1].used ||
1024 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1025 brp_1++;
1026
1027 printf("brp(CTX) found num: %d\n", brp_1);
1028 if (brp_1 >= aarch64->brp_num) {
1029 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1030 return ERROR_FAIL;
1031 }
1032
1033 while ((brp_list[brp_2].used ||
1034 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1035 brp_2++;
1036
1037 printf("brp(IVA) found num: %d\n", brp_2);
1038 if (brp_2 >= aarch64->brp_num) {
1039 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1040 return ERROR_FAIL;
1041 }
1042
1043 breakpoint->set = brp_1 + 1;
1044 breakpoint->linked_BRP = brp_2;
1045 control_CTX = ((CTX_machmode & 0x7) << 20)
1046 | (brp_2 << 16)
1047 | (0 << 14)
1048 | (CTX_byte_addr_select << 5)
1049 | (3 << 1) | 1;
1050 brp_list[brp_1].used = 1;
1051 brp_list[brp_1].value = (breakpoint->asid);
1052 brp_list[brp_1].control = control_CTX;
1053 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1054 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1055 brp_list[brp_1].value);
1056 if (retval != ERROR_OK)
1057 return retval;
1058 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1059 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1060 brp_list[brp_1].control);
1061 if (retval != ERROR_OK)
1062 return retval;
1063
1064 control_IVA = ((IVA_machmode & 0x7) << 20)
1065 | (brp_1 << 16)
1066 | (1 << 13)
1067 | (IVA_byte_addr_select << 5)
1068 | (3 << 1) | 1;
1069 brp_list[brp_2].used = 1;
1070 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1071 brp_list[brp_2].control = control_IVA;
1072 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1073 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1074 brp_list[brp_2].value & 0xFFFFFFFF);
1075 if (retval != ERROR_OK)
1076 return retval;
1077 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1078 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1079 brp_list[brp_2].value >> 32);
1080 if (retval != ERROR_OK)
1081 return retval;
1082 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1083 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1084 brp_list[brp_2].control);
1085 if (retval != ERROR_OK)
1086 return retval;
1087
1088 return ERROR_OK;
1089 }
1090
1091 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1092 {
1093 int retval;
1094 struct aarch64_common *aarch64 = target_to_aarch64(target);
1095 struct armv8_common *armv8 = &aarch64->armv8_common;
1096 struct aarch64_brp *brp_list = aarch64->brp_list;
1097
1098 if (!breakpoint->set) {
1099 LOG_WARNING("breakpoint not set");
1100 return ERROR_OK;
1101 }
1102
1103 if (breakpoint->type == BKPT_HARD) {
1104 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1105 int brp_i = breakpoint->set - 1;
1106 int brp_j = breakpoint->linked_BRP;
1107 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1108 LOG_DEBUG("Invalid BRP number in breakpoint");
1109 return ERROR_OK;
1110 }
1111 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1112 brp_list[brp_i].control, brp_list[brp_i].value);
1113 brp_list[brp_i].used = 0;
1114 brp_list[brp_i].value = 0;
1115 brp_list[brp_i].control = 0;
1116 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1117 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1118 brp_list[brp_i].control);
1119 if (retval != ERROR_OK)
1120 return retval;
1121 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1122 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1123 (uint32_t)brp_list[brp_i].value);
1124 if (retval != ERROR_OK)
1125 return retval;
1126 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1127 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1128 (uint32_t)brp_list[brp_i].value);
1129 if (retval != ERROR_OK)
1130 return retval;
1131 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1132 LOG_DEBUG("Invalid BRP number in breakpoint");
1133 return ERROR_OK;
1134 }
1135 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1136 brp_list[brp_j].control, brp_list[brp_j].value);
1137 brp_list[brp_j].used = 0;
1138 brp_list[brp_j].value = 0;
1139 brp_list[brp_j].control = 0;
1140 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1141 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1142 brp_list[brp_j].control);
1143 if (retval != ERROR_OK)
1144 return retval;
1145 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1146 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1147 (uint32_t)brp_list[brp_j].value);
1148 if (retval != ERROR_OK)
1149 return retval;
1150 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1151 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1152 (uint32_t)brp_list[brp_j].value);
1153 if (retval != ERROR_OK)
1154 return retval;
1155
1156 breakpoint->linked_BRP = 0;
1157 breakpoint->set = 0;
1158 return ERROR_OK;
1159
1160 } else {
1161 int brp_i = breakpoint->set - 1;
1162 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1163 LOG_DEBUG("Invalid BRP number in breakpoint");
1164 return ERROR_OK;
1165 }
1166 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1167 brp_list[brp_i].control, brp_list[brp_i].value);
1168 brp_list[brp_i].used = 0;
1169 brp_list[brp_i].value = 0;
1170 brp_list[brp_i].control = 0;
1171 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1172 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1173 brp_list[brp_i].control);
1174 if (retval != ERROR_OK)
1175 return retval;
1176 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1177 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1178 brp_list[brp_i].value);
1179 if (retval != ERROR_OK)
1180 return retval;
1181
1182 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1183 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1184 (uint32_t)brp_list[brp_i].value);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 breakpoint->set = 0;
1188 return ERROR_OK;
1189 }
1190 } else {
1191 /* restore original instruction (kept in target endianness) */
1192
1193 armv8_cache_d_inner_flush_virt(armv8,
1194 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1195 breakpoint->length);
1196
1197 if (breakpoint->length == 4) {
1198 retval = target_write_memory(target,
1199 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1200 4, 1, breakpoint->orig_instr);
1201 if (retval != ERROR_OK)
1202 return retval;
1203 } else {
1204 retval = target_write_memory(target,
1205 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1206 2, 1, breakpoint->orig_instr);
1207 if (retval != ERROR_OK)
1208 return retval;
1209 }
1210
1211 armv8_cache_d_inner_flush_virt(armv8,
1212 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1213 breakpoint->length);
1214
1215 armv8_cache_i_inner_inval_virt(armv8,
1216 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1217 breakpoint->length);
1218 }
1219 breakpoint->set = 0;
1220
1221 return ERROR_OK;
1222 }
1223
1224 static int aarch64_add_breakpoint(struct target *target,
1225 struct breakpoint *breakpoint)
1226 {
1227 struct aarch64_common *aarch64 = target_to_aarch64(target);
1228
1229 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1230 LOG_INFO("no hardware breakpoint available");
1231 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1232 }
1233
1234 if (breakpoint->type == BKPT_HARD)
1235 aarch64->brp_num_available--;
1236
1237 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1238 }
1239
1240 static int aarch64_add_context_breakpoint(struct target *target,
1241 struct breakpoint *breakpoint)
1242 {
1243 struct aarch64_common *aarch64 = target_to_aarch64(target);
1244
1245 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1246 LOG_INFO("no hardware breakpoint available");
1247 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1248 }
1249
1250 if (breakpoint->type == BKPT_HARD)
1251 aarch64->brp_num_available--;
1252
1253 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1254 }
1255
1256 static int aarch64_add_hybrid_breakpoint(struct target *target,
1257 struct breakpoint *breakpoint)
1258 {
1259 struct aarch64_common *aarch64 = target_to_aarch64(target);
1260
1261 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1262 LOG_INFO("no hardware breakpoint available");
1263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1264 }
1265
1266 if (breakpoint->type == BKPT_HARD)
1267 aarch64->brp_num_available--;
1268
1269 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1270 }
1271
1272
1273 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1274 {
1275 struct aarch64_common *aarch64 = target_to_aarch64(target);
1276
1277 #if 0
1278 /* It is perfectly possible to remove breakpoints while the target is running */
1279 if (target->state != TARGET_HALTED) {
1280 LOG_WARNING("target not halted");
1281 return ERROR_TARGET_NOT_HALTED;
1282 }
1283 #endif
1284
1285 if (breakpoint->set) {
1286 aarch64_unset_breakpoint(target, breakpoint);
1287 if (breakpoint->type == BKPT_HARD)
1288 aarch64->brp_num_available++;
1289 }
1290
1291 return ERROR_OK;
1292 }
1293
1294 /*
1295 * Cortex-A8 Reset functions
1296 */
1297
1298 static int aarch64_assert_reset(struct target *target)
1299 {
1300 struct armv8_common *armv8 = target_to_armv8(target);
1301
1302 LOG_DEBUG(" ");
1303
1304 /* FIXME when halt is requested, make it work somehow... */
1305
1306 /* Issue some kind of warm reset. */
1307 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1308 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1309 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1310 /* REVISIT handle "pulls" cases, if there's
1311 * hardware that needs them to work.
1312 */
1313 jtag_add_reset(0, 1);
1314 } else {
1315 LOG_ERROR("%s: how to reset?", target_name(target));
1316 return ERROR_FAIL;
1317 }
1318
1319 /* registers are now invalid */
1320 register_cache_invalidate(armv8->arm.core_cache);
1321
1322 target->state = TARGET_RESET;
1323
1324 return ERROR_OK;
1325 }
1326
1327 static int aarch64_deassert_reset(struct target *target)
1328 {
1329 int retval;
1330
1331 LOG_DEBUG(" ");
1332
1333 /* be certain SRST is off */
1334 jtag_add_reset(0, 0);
1335
1336 retval = aarch64_poll(target);
1337 if (retval != ERROR_OK)
1338 return retval;
1339
1340 if (target->reset_halt) {
1341 if (target->state != TARGET_HALTED) {
1342 LOG_WARNING("%s: ran after reset and before halt ...",
1343 target_name(target));
1344 retval = target_halt(target);
1345 if (retval != ERROR_OK)
1346 return retval;
1347 }
1348 }
1349
1350 return ERROR_OK;
1351 }
1352
1353 static int aarch64_write_apb_ap_memory(struct target *target,
1354 uint64_t address, uint32_t size,
1355 uint32_t count, const uint8_t *buffer)
1356 {
1357 /* write memory through APB-AP */
1358 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1359 struct armv8_common *armv8 = target_to_armv8(target);
1360 struct arm_dpm *dpm = &armv8->dpm;
1361 struct arm *arm = &armv8->arm;
1362 int total_bytes = count * size;
1363 int total_u32;
1364 int start_byte = address & 0x3;
1365 int end_byte = (address + total_bytes) & 0x3;
1366 struct reg *reg;
1367 uint32_t dscr;
1368 uint8_t *tmp_buff = NULL;
1369
1370 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1371 address, size, count);
1372 if (target->state != TARGET_HALTED) {
1373 LOG_WARNING("target not halted");
1374 return ERROR_TARGET_NOT_HALTED;
1375 }
1376
1377 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1378
1379 /* Mark register R0 as dirty, as it will be used
1380 * for transferring the data.
1381 * It will be restored automatically when exiting
1382 * debug mode
1383 */
1384 reg = armv8_reg_current(arm, 1);
1385 reg->dirty = true;
1386
1387 reg = armv8_reg_current(arm, 0);
1388 reg->dirty = true;
1389
1390 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1391
1392 /* The algorithm only copies 32 bit words, so the buffer
1393 * should be expanded to include the words at either end.
1394 * The first and last words will be read first to avoid
1395 * corruption if needed.
1396 */
1397 tmp_buff = malloc(total_u32 * 4);
1398
1399 if ((start_byte != 0) && (total_u32 > 1)) {
1400 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1401 * the other bytes in the word.
1402 */
1403 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1404 if (retval != ERROR_OK)
1405 goto error_free_buff_w;
1406 }
1407
1408 /* If end of write is not aligned, or the write is less than 4 bytes */
1409 if ((end_byte != 0) ||
1410 ((total_u32 == 1) && (total_bytes != 4))) {
1411
1412 /* Read the last word to avoid corruption during 32 bit write */
1413 int mem_offset = (total_u32-1) * 4;
1414 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1415 if (retval != ERROR_OK)
1416 goto error_free_buff_w;
1417 }
1418
1419 /* Copy the write buffer over the top of the temporary buffer */
1420 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1421
1422 /* We now have a 32 bit aligned buffer that can be written */
1423
1424 /* Read DSCR */
1425 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1426 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1427 if (retval != ERROR_OK)
1428 goto error_free_buff_w;
1429
1430 /* Set Normal access mode */
1431 dscr = (dscr & ~DSCR_MA);
1432 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1433 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1434
1435 if (arm->core_state == ARM_STATE_AARCH64) {
1436 /* Write X0 with value 'address' using write procedure */
1437 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1438 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1439 retval = dpm->instr_write_data_dcc_64(dpm,
1440 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1441 } else {
1442 /* Write R0 with value 'address' using write procedure */
1443 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1444 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1445 dpm->instr_write_data_dcc(dpm,
1446 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1447
1448 }
1449 /* Step 1.d - Change DCC to memory mode */
1450 dscr = dscr | DSCR_MA;
1451 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1452 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1453 if (retval != ERROR_OK)
1454 goto error_unset_dtr_w;
1455
1456
1457 /* Step 2.a - Do the write */
1458 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1459 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1460 if (retval != ERROR_OK)
1461 goto error_unset_dtr_w;
1462
1463 /* Step 3.a - Switch DTR mode back to Normal mode */
1464 dscr = (dscr & ~DSCR_MA);
1465 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1466 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1467 if (retval != ERROR_OK)
1468 goto error_unset_dtr_w;
1469
1470 /* Check for sticky abort flags in the DSCR */
1471 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1472 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1473 if (retval != ERROR_OK)
1474 goto error_free_buff_w;
1475
1476 dpm->dscr = dscr;
1477 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1478 /* Abort occurred - clear it and exit */
1479 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1480 armv8_dpm_handle_exception(dpm);
1481 goto error_free_buff_w;
1482 }
1483
1484 /* Done */
1485 free(tmp_buff);
1486 return ERROR_OK;
1487
1488 error_unset_dtr_w:
1489 /* Unset DTR mode */
1490 mem_ap_read_atomic_u32(armv8->debug_ap,
1491 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1492 dscr = (dscr & ~DSCR_MA);
1493 mem_ap_write_atomic_u32(armv8->debug_ap,
1494 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1495 error_free_buff_w:
1496 LOG_ERROR("error");
1497 free(tmp_buff);
1498 return ERROR_FAIL;
1499 }
1500
1501 static int aarch64_read_apb_ap_memory(struct target *target,
1502 target_addr_t address, uint32_t size,
1503 uint32_t count, uint8_t *buffer)
1504 {
1505 /* read memory through APB-AP */
1506 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1507 struct armv8_common *armv8 = target_to_armv8(target);
1508 struct arm_dpm *dpm = &armv8->dpm;
1509 struct arm *arm = &armv8->arm;
1510 int total_bytes = count * size;
1511 int total_u32;
1512 int start_byte = address & 0x3;
1513 int end_byte = (address + total_bytes) & 0x3;
1514 struct reg *reg;
1515 uint32_t dscr;
1516 uint8_t *tmp_buff = NULL;
1517 uint8_t *u8buf_ptr;
1518 uint32_t value;
1519
1520 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1521 address, size, count);
1522 if (target->state != TARGET_HALTED) {
1523 LOG_WARNING("target not halted");
1524 return ERROR_TARGET_NOT_HALTED;
1525 }
1526
1527 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1528 /* Mark register X0, X1 as dirty, as it will be used
1529 * for transferring the data.
1530 * It will be restored automatically when exiting
1531 * debug mode
1532 */
1533 reg = armv8_reg_current(arm, 1);
1534 reg->dirty = true;
1535
1536 reg = armv8_reg_current(arm, 0);
1537 reg->dirty = true;
1538
1539 /* Read DSCR */
1540 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1541 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1542
1543 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1544
1545 /* Set Normal access mode */
1546 dscr = (dscr & ~DSCR_MA);
1547 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1548 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1549
1550 if (arm->core_state == ARM_STATE_AARCH64) {
1551 /* Write X0 with value 'address' using write procedure */
1552 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1553 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1554 retval += dpm->instr_write_data_dcc_64(dpm,
1555 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1556 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1557 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1558 /* Step 1.e - Change DCC to memory mode */
1559 dscr = dscr | DSCR_MA;
1560 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1561 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1562 /* Step 1.f - read DBGDTRTX and discard the value */
1563 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1564 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1565 } else {
1566 /* Write R0 with value 'address' using write procedure */
1567 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1568 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1569 retval += dpm->instr_write_data_dcc(dpm,
1570 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1571 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1572 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1573 /* Step 1.e - Change DCC to memory mode */
1574 dscr = dscr | DSCR_MA;
1575 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1576 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1577 /* Step 1.f - read DBGDTRTX and discard the value */
1578 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1579 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1580
1581 }
1582 if (retval != ERROR_OK)
1583 goto error_unset_dtr_r;
1584
1585 /* Optimize the read as much as we can, either way we read in a single pass */
1586 if ((start_byte) || (end_byte)) {
1587 /* The algorithm only copies 32 bit words, so the buffer
1588 * should be expanded to include the words at either end.
1589 * The first and last words will be read into a temp buffer
1590 * to avoid corruption
1591 */
1592 tmp_buff = malloc(total_u32 * 4);
1593 if (!tmp_buff)
1594 goto error_unset_dtr_r;
1595
1596 /* use the tmp buffer to read the entire data */
1597 u8buf_ptr = tmp_buff;
1598 } else
1599 /* address and read length are aligned so read directly into the passed buffer */
1600 u8buf_ptr = buffer;
1601
1602 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1603 * Abort flags are sticky, so can be read at end of transactions
1604 *
1605 * This data is read in aligned to 32 bit boundary.
1606 */
1607
1608 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1609 * increments X0 by 4. */
1610 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1611 armv8->debug_base + CPUV8_DBG_DTRTX);
1612 if (retval != ERROR_OK)
1613 goto error_unset_dtr_r;
1614
1615 /* Step 3.a - set DTR access mode back to Normal mode */
1616 dscr = (dscr & ~DSCR_MA);
1617 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1618 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1619 if (retval != ERROR_OK)
1620 goto error_free_buff_r;
1621
1622 /* Step 3.b - read DBGDTRTX for the final value */
1623 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1624 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1625 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1626
1627 /* Check for sticky abort flags in the DSCR */
1628 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1629 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1630 if (retval != ERROR_OK)
1631 goto error_free_buff_r;
1632
1633 dpm->dscr = dscr;
1634
1635 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1636 /* Abort occurred - clear it and exit */
1637 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1638 armv8_dpm_handle_exception(dpm);
1639 goto error_free_buff_r;
1640 }
1641
1642 /* check if we need to copy aligned data by applying any shift necessary */
1643 if (tmp_buff) {
1644 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1645 free(tmp_buff);
1646 }
1647
1648 /* Done */
1649 return ERROR_OK;
1650
1651 error_unset_dtr_r:
1652 /* Unset DTR mode */
1653 mem_ap_read_atomic_u32(armv8->debug_ap,
1654 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1655 dscr = (dscr & ~DSCR_MA);
1656 mem_ap_write_atomic_u32(armv8->debug_ap,
1657 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1658 error_free_buff_r:
1659 LOG_ERROR("error");
1660 free(tmp_buff);
1661 return ERROR_FAIL;
1662 }
1663
1664 static int aarch64_read_phys_memory(struct target *target,
1665 target_addr_t address, uint32_t size,
1666 uint32_t count, uint8_t *buffer)
1667 {
1668 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1669 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1670 address, size, count);
1671
1672 if (count && buffer) {
1673 /* read memory through APB-AP */
1674 retval = aarch64_mmu_modify(target, 0);
1675 if (retval != ERROR_OK)
1676 return retval;
1677 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1678 }
1679 return retval;
1680 }
1681
1682 static int aarch64_read_memory(struct target *target, target_addr_t address,
1683 uint32_t size, uint32_t count, uint8_t *buffer)
1684 {
1685 int mmu_enabled = 0;
1686 int retval;
1687
1688 /* aarch64 handles unaligned memory access */
1689 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1690 size, count);
1691
1692 /* determine if MMU was enabled on target stop */
1693 retval = aarch64_mmu(target, &mmu_enabled);
1694 if (retval != ERROR_OK)
1695 return retval;
1696
1697 if (mmu_enabled) {
1698 retval = aarch64_check_address(target, address);
1699 if (retval != ERROR_OK)
1700 return retval;
1701 /* enable MMU as we could have disabled it for phys access */
1702 retval = aarch64_mmu_modify(target, 1);
1703 if (retval != ERROR_OK)
1704 return retval;
1705 }
1706 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1707 }
1708
1709 static int aarch64_write_phys_memory(struct target *target,
1710 target_addr_t address, uint32_t size,
1711 uint32_t count, const uint8_t *buffer)
1712 {
1713 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1714
1715 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1716 size, count);
1717
1718 if (count && buffer) {
1719 /* write memory through APB-AP */
1720 retval = aarch64_mmu_modify(target, 0);
1721 if (retval != ERROR_OK)
1722 return retval;
1723 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1724 }
1725
1726 return retval;
1727 }
1728
1729 static int aarch64_write_memory(struct target *target, target_addr_t address,
1730 uint32_t size, uint32_t count, const uint8_t *buffer)
1731 {
1732 int mmu_enabled = 0;
1733 int retval;
1734
1735 /* aarch64 handles unaligned memory access */
1736 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1737 "; count %" PRId32, address, size, count);
1738
1739 /* determine if MMU was enabled on target stop */
1740 retval = aarch64_mmu(target, &mmu_enabled);
1741 if (retval != ERROR_OK)
1742 return retval;
1743
1744 if (mmu_enabled) {
1745 retval = aarch64_check_address(target, address);
1746 if (retval != ERROR_OK)
1747 return retval;
1748 /* enable MMU as we could have disabled it for phys access */
1749 retval = aarch64_mmu_modify(target, 1);
1750 if (retval != ERROR_OK)
1751 return retval;
1752 }
1753 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1754 }
1755
1756 static int aarch64_handle_target_request(void *priv)
1757 {
1758 struct target *target = priv;
1759 struct armv8_common *armv8 = target_to_armv8(target);
1760 int retval;
1761
1762 if (!target_was_examined(target))
1763 return ERROR_OK;
1764 if (!target->dbg_msg_enabled)
1765 return ERROR_OK;
1766
1767 if (target->state == TARGET_RUNNING) {
1768 uint32_t request;
1769 uint32_t dscr;
1770 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1771 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1772
1773 /* check if we have data */
1774 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1775 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1776 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1777 if (retval == ERROR_OK) {
1778 target_request(target, request);
1779 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1780 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1781 }
1782 }
1783 }
1784
1785 return ERROR_OK;
1786 }
1787
1788 static int aarch64_examine_first(struct target *target)
1789 {
1790 struct aarch64_common *aarch64 = target_to_aarch64(target);
1791 struct armv8_common *armv8 = &aarch64->armv8_common;
1792 struct adiv5_dap *swjdp = armv8->arm.dap;
1793 int i;
1794 int retval = ERROR_OK;
1795 uint64_t debug, ttypr;
1796 uint32_t cpuid;
1797 uint32_t tmp0, tmp1;
1798 debug = ttypr = cpuid = 0;
1799
1800 /* We do one extra read to ensure DAP is configured,
1801 * we call ahbap_debugport_init(swjdp) instead
1802 */
1803 retval = dap_dp_init(swjdp);
1804 if (retval != ERROR_OK)
1805 return retval;
1806
1807 /* Search for the APB-AB - it is needed for access to debug registers */
1808 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1809 if (retval != ERROR_OK) {
1810 LOG_ERROR("Could not find APB-AP for debug access");
1811 return retval;
1812 }
1813
1814 retval = mem_ap_init(armv8->debug_ap);
1815 if (retval != ERROR_OK) {
1816 LOG_ERROR("Could not initialize the APB-AP");
1817 return retval;
1818 }
1819
1820 armv8->debug_ap->memaccess_tck = 80;
1821
1822 if (!target->dbgbase_set) {
1823 uint32_t dbgbase;
1824 /* Get ROM Table base */
1825 uint32_t apid;
1826 int32_t coreidx = target->coreid;
1827 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1828 if (retval != ERROR_OK)
1829 return retval;
1830 /* Lookup 0x15 -- Processor DAP */
1831 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1832 &armv8->debug_base, &coreidx);
1833 if (retval != ERROR_OK)
1834 return retval;
1835 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1836 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1837 } else
1838 armv8->debug_base = target->dbgbase;
1839
1840 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1841 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1842 if (retval != ERROR_OK) {
1843 LOG_DEBUG("LOCK debug access fail");
1844 return retval;
1845 }
1846
1847 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1848 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1849 if (retval != ERROR_OK) {
1850 LOG_DEBUG("Examine %s failed", "oslock");
1851 return retval;
1852 }
1853
1854 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1855 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1856 if (retval != ERROR_OK) {
1857 LOG_DEBUG("Examine %s failed", "CPUID");
1858 return retval;
1859 }
1860
1861 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1862 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1863 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1864 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1865 if (retval != ERROR_OK) {
1866 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1867 return retval;
1868 }
1869 ttypr |= tmp1;
1870 ttypr = (ttypr << 32) | tmp0;
1871
1872 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1873 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1874 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1875 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1876 if (retval != ERROR_OK) {
1877 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1878 return retval;
1879 }
1880 debug |= tmp1;
1881 debug = (debug << 32) | tmp0;
1882
1883 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1884 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1885 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1886
1887 if (target->ctibase == 0) {
1888 /* assume a v8 rom table layout */
1889 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1890 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1891 } else
1892 armv8->cti_base = target->ctibase;
1893
1894 armv8->arm.core_type = ARM_MODE_MON;
1895 retval = aarch64_dpm_setup(aarch64, debug);
1896 if (retval != ERROR_OK)
1897 return retval;
1898
1899 /* Setup Breakpoint Register Pairs */
1900 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1901 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1902 aarch64->brp_num_available = aarch64->brp_num;
1903 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1904 for (i = 0; i < aarch64->brp_num; i++) {
1905 aarch64->brp_list[i].used = 0;
1906 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1907 aarch64->brp_list[i].type = BRP_NORMAL;
1908 else
1909 aarch64->brp_list[i].type = BRP_CONTEXT;
1910 aarch64->brp_list[i].value = 0;
1911 aarch64->brp_list[i].control = 0;
1912 aarch64->brp_list[i].BRPn = i;
1913 }
1914
1915 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1916
1917 target_set_examined(target);
1918 return ERROR_OK;
1919 }
1920
1921 static int aarch64_examine(struct target *target)
1922 {
1923 int retval = ERROR_OK;
1924
1925 /* don't re-probe hardware after each reset */
1926 if (!target_was_examined(target))
1927 retval = aarch64_examine_first(target);
1928
1929 /* Configure core debug access */
1930 if (retval == ERROR_OK)
1931 retval = aarch64_init_debug_access(target);
1932
1933 return retval;
1934 }
1935
1936 /*
1937 * Cortex-A8 target creation and initialization
1938 */
1939
1940 static int aarch64_init_target(struct command_context *cmd_ctx,
1941 struct target *target)
1942 {
1943 /* examine_first() does a bunch of this */
1944 return ERROR_OK;
1945 }
1946
1947 static int aarch64_init_arch_info(struct target *target,
1948 struct aarch64_common *aarch64, struct jtag_tap *tap)
1949 {
1950 struct armv8_common *armv8 = &aarch64->armv8_common;
1951 struct adiv5_dap *dap = armv8->arm.dap;
1952
1953 armv8->arm.dap = dap;
1954
1955 /* Setup struct aarch64_common */
1956 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1957 /* tap has no dap initialized */
1958 if (!tap->dap) {
1959 tap->dap = dap_init();
1960
1961 /* Leave (only) generic DAP stuff for debugport_init() */
1962 tap->dap->tap = tap;
1963 }
1964
1965 armv8->arm.dap = tap->dap;
1966
1967 aarch64->fast_reg_read = 0;
1968
1969 /* register arch-specific functions */
1970 armv8->examine_debug_reason = NULL;
1971
1972 armv8->post_debug_entry = aarch64_post_debug_entry;
1973
1974 armv8->pre_restore_context = NULL;
1975
1976 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1977
1978 /* REVISIT v7a setup should be in a v7a-specific routine */
1979 armv8_init_arch_info(target, armv8);
1980 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1981
1982 return ERROR_OK;
1983 }
1984
1985 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1986 {
1987 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1988
1989 return aarch64_init_arch_info(target, aarch64, target->tap);
1990 }
1991
1992 static int aarch64_mmu(struct target *target, int *enabled)
1993 {
1994 if (target->state != TARGET_HALTED) {
1995 LOG_ERROR("%s: target not halted", __func__);
1996 return ERROR_TARGET_INVALID;
1997 }
1998
1999 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2000 return ERROR_OK;
2001 }
2002
2003 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2004 target_addr_t *phys)
2005 {
2006 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2007 }
2008
2009 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2010 {
2011 struct target *target = get_current_target(CMD_CTX);
2012 struct armv8_common *armv8 = target_to_armv8(target);
2013
2014 return armv8_handle_cache_info_command(CMD_CTX,
2015 &armv8->armv8_mmu.armv8_cache);
2016 }
2017
2018
2019 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2020 {
2021 struct target *target = get_current_target(CMD_CTX);
2022 if (!target_was_examined(target)) {
2023 LOG_ERROR("target not examined yet");
2024 return ERROR_FAIL;
2025 }
2026
2027 return aarch64_init_debug_access(target);
2028 }
2029 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2030 {
2031 struct target *target = get_current_target(CMD_CTX);
2032 /* check target is an smp target */
2033 struct target_list *head;
2034 struct target *curr;
2035 head = target->head;
2036 target->smp = 0;
2037 if (head != (struct target_list *)NULL) {
2038 while (head != (struct target_list *)NULL) {
2039 curr = head->target;
2040 curr->smp = 0;
2041 head = head->next;
2042 }
2043 /* fixes the target display to the debugger */
2044 target->gdb_service->target = target;
2045 }
2046 return ERROR_OK;
2047 }
2048
2049 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2050 {
2051 struct target *target = get_current_target(CMD_CTX);
2052 struct target_list *head;
2053 struct target *curr;
2054 head = target->head;
2055 if (head != (struct target_list *)NULL) {
2056 target->smp = 1;
2057 while (head != (struct target_list *)NULL) {
2058 curr = head->target;
2059 curr->smp = 1;
2060 head = head->next;
2061 }
2062 }
2063 return ERROR_OK;
2064 }
2065
2066 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2067 {
2068 struct target *target = get_current_target(CMD_CTX);
2069 int retval = ERROR_OK;
2070 struct target_list *head;
2071 head = target->head;
2072 if (head != (struct target_list *)NULL) {
2073 if (CMD_ARGC == 1) {
2074 int coreid = 0;
2075 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2076 if (ERROR_OK != retval)
2077 return retval;
2078 target->gdb_service->core[1] = coreid;
2079
2080 }
2081 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2082 , target->gdb_service->core[1]);
2083 }
2084 return ERROR_OK;
2085 }
2086
2087 static const struct command_registration aarch64_exec_command_handlers[] = {
2088 {
2089 .name = "cache_info",
2090 .handler = aarch64_handle_cache_info_command,
2091 .mode = COMMAND_EXEC,
2092 .help = "display information about target caches",
2093 .usage = "",
2094 },
2095 {
2096 .name = "dbginit",
2097 .handler = aarch64_handle_dbginit_command,
2098 .mode = COMMAND_EXEC,
2099 .help = "Initialize core debug",
2100 .usage = "",
2101 },
2102 { .name = "smp_off",
2103 .handler = aarch64_handle_smp_off_command,
2104 .mode = COMMAND_EXEC,
2105 .help = "Stop smp handling",
2106 .usage = "",
2107 },
2108 {
2109 .name = "smp_on",
2110 .handler = aarch64_handle_smp_on_command,
2111 .mode = COMMAND_EXEC,
2112 .help = "Restart smp handling",
2113 .usage = "",
2114 },
2115 {
2116 .name = "smp_gdb",
2117 .handler = aarch64_handle_smp_gdb_command,
2118 .mode = COMMAND_EXEC,
2119 .help = "display/fix current core played to gdb",
2120 .usage = "",
2121 },
2122
2123
2124 COMMAND_REGISTRATION_DONE
2125 };
2126 static const struct command_registration aarch64_command_handlers[] = {
2127 {
2128 .chain = arm_command_handlers,
2129 },
2130 {
2131 .chain = armv8_command_handlers,
2132 },
2133 {
2134 .name = "cortex_a",
2135 .mode = COMMAND_ANY,
2136 .help = "Cortex-A command group",
2137 .usage = "",
2138 .chain = aarch64_exec_command_handlers,
2139 },
2140 COMMAND_REGISTRATION_DONE
2141 };
2142
2143 struct target_type aarch64_target = {
2144 .name = "aarch64",
2145
2146 .poll = aarch64_poll,
2147 .arch_state = armv8_arch_state,
2148
2149 .halt = aarch64_halt,
2150 .resume = aarch64_resume,
2151 .step = aarch64_step,
2152
2153 .assert_reset = aarch64_assert_reset,
2154 .deassert_reset = aarch64_deassert_reset,
2155
2156 /* REVISIT allow exporting VFP3 registers ... */
2157 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2158
2159 .read_memory = aarch64_read_memory,
2160 .write_memory = aarch64_write_memory,
2161
2162 .checksum_memory = arm_checksum_memory,
2163 .blank_check_memory = arm_blank_check_memory,
2164
2165 .run_algorithm = armv4_5_run_algorithm,
2166
2167 .add_breakpoint = aarch64_add_breakpoint,
2168 .add_context_breakpoint = aarch64_add_context_breakpoint,
2169 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2170 .remove_breakpoint = aarch64_remove_breakpoint,
2171 .add_watchpoint = NULL,
2172 .remove_watchpoint = NULL,
2173
2174 .commands = aarch64_command_handlers,
2175 .target_create = aarch64_target_create,
2176 .init_target = aarch64_init_target,
2177 .examine = aarch64_examine,
2178
2179 .read_phys_memory = aarch64_read_phys_memory,
2180 .write_phys_memory = aarch64_write_phys_memory,
2181 .mmu = aarch64_mmu,
2182 .virt2phys = aarch64_virt2phys,
2183 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)