aarch64: discard async aborts on entering debug state
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 uint32_t dscr;
286
287 /* Read DSCR */
288 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
289 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
290 if (ERROR_OK != retval)
291 return retval;
292
293 /* clear bitfield */
294 dscr &= ~bit_mask;
295 /* put new value */
296 dscr |= value & bit_mask;
297
298 /* write new DSCR */
299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
300 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
301 return retval;
302 }
303
304 static struct target *get_aarch64(struct target *target, int32_t coreid)
305 {
306 struct target_list *head;
307 struct target *curr;
308
309 head = target->head;
310 while (head != (struct target_list *)NULL) {
311 curr = head->target;
312 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
313 return curr;
314 head = head->next;
315 }
316 return target;
317 }
318 static int aarch64_halt(struct target *target);
319
320 static int aarch64_halt_smp(struct target *target)
321 {
322 int retval = ERROR_OK;
323 struct target_list *head = target->head;
324
325 while (head != (struct target_list *)NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328
329 /* open the gate for channel 0 to let HALT requests pass to the CTM */
330 if (curr->smp) {
331 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
332 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
333 if (retval == ERROR_OK)
334 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
335 }
336 if (retval != ERROR_OK)
337 break;
338
339 head = head->next;
340 }
341
342 /* halt the target PE */
343 if (retval == ERROR_OK)
344 retval = aarch64_halt(target);
345
346 return retval;
347 }
348
349 static int update_halt_gdb(struct target *target)
350 {
351 int retval = 0;
352 if (target->gdb_service && target->gdb_service->core[0] == -1) {
353 target->gdb_service->target = target;
354 target->gdb_service->core[0] = target->coreid;
355 retval += aarch64_halt_smp(target);
356 }
357 return retval;
358 }
359
360 /*
361 * Cortex-A8 Run control
362 */
363
364 static int aarch64_poll(struct target *target)
365 {
366 int retval = ERROR_OK;
367 uint32_t dscr;
368 struct aarch64_common *aarch64 = target_to_aarch64(target);
369 struct armv8_common *armv8 = &aarch64->armv8_common;
370 enum target_state prev_target_state = target->state;
371 /* toggle to another core is done by gdb as follow */
372 /* maint packet J core_id */
373 /* continue */
374 /* the next polling trigger an halt event sent to gdb */
375 if ((target->state == TARGET_HALTED) && (target->smp) &&
376 (target->gdb_service) &&
377 (target->gdb_service->target == NULL)) {
378 target->gdb_service->target =
379 get_aarch64(target, target->gdb_service->core[1]);
380 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
381 return retval;
382 }
383 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
384 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
385 if (retval != ERROR_OK)
386 return retval;
387 aarch64->cpudbg_dscr = dscr;
388
389 if (DSCR_RUN_MODE(dscr) == 0x3) {
390 if (prev_target_state != TARGET_HALTED) {
391 /* We have a halting debug event */
392 LOG_DEBUG("Target halted");
393 target->state = TARGET_HALTED;
394 if ((prev_target_state == TARGET_RUNNING)
395 || (prev_target_state == TARGET_UNKNOWN)
396 || (prev_target_state == TARGET_RESET)) {
397 retval = aarch64_debug_entry(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (target->smp) {
401 retval = update_halt_gdb(target);
402 if (retval != ERROR_OK)
403 return retval;
404 }
405 target_call_event_callbacks(target,
406 TARGET_EVENT_HALTED);
407 }
408 if (prev_target_state == TARGET_DEBUG_RUNNING) {
409 LOG_DEBUG(" ");
410
411 retval = aarch64_debug_entry(target);
412 if (retval != ERROR_OK)
413 return retval;
414 if (target->smp) {
415 retval = update_halt_gdb(target);
416 if (retval != ERROR_OK)
417 return retval;
418 }
419
420 target_call_event_callbacks(target,
421 TARGET_EVENT_DEBUG_HALTED);
422 }
423 }
424 } else
425 target->state = TARGET_RUNNING;
426
427 return retval;
428 }
429
430 static int aarch64_halt(struct target *target)
431 {
432 int retval = ERROR_OK;
433 uint32_t dscr;
434 struct armv8_common *armv8 = target_to_armv8(target);
435
436 /*
437 * add HDE in halting debug mode
438 */
439 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
440 if (retval != ERROR_OK)
441 return retval;
442
443 /* trigger an event on channel 0, this outputs a halt request to the PE */
444 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
445 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
446 if (retval != ERROR_OK)
447 return retval;
448
449 long long then = timeval_ms();
450 for (;; ) {
451 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
452 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 if ((dscr & DSCRV8_HALT_MASK) != 0)
456 break;
457 if (timeval_ms() > then + 1000) {
458 LOG_ERROR("Timeout waiting for halt");
459 return ERROR_FAIL;
460 }
461 }
462
463 target->debug_reason = DBG_REASON_DBGRQ;
464
465 return ERROR_OK;
466 }
467
468 static int aarch64_internal_restore(struct target *target, int current,
469 uint64_t *address, int handle_breakpoints, int debug_execution)
470 {
471 struct armv8_common *armv8 = target_to_armv8(target);
472 struct arm *arm = &armv8->arm;
473 int retval;
474 uint64_t resume_pc;
475
476 if (!debug_execution)
477 target_free_all_working_areas(target);
478
479 /* current = 1: continue on current pc, otherwise continue at <address> */
480 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
481 if (!current)
482 resume_pc = *address;
483 else
484 *address = resume_pc;
485
486 /* Make sure that the Armv7 gdb thumb fixups does not
487 * kill the return address
488 */
489 switch (arm->core_state) {
490 case ARM_STATE_ARM:
491 resume_pc &= 0xFFFFFFFC;
492 break;
493 case ARM_STATE_AARCH64:
494 resume_pc &= 0xFFFFFFFFFFFFFFFC;
495 break;
496 case ARM_STATE_THUMB:
497 case ARM_STATE_THUMB_EE:
498 /* When the return address is loaded into PC
499 * bit 0 must be 1 to stay in Thumb state
500 */
501 resume_pc |= 0x1;
502 break;
503 case ARM_STATE_JAZELLE:
504 LOG_ERROR("How do I resume into Jazelle state??");
505 return ERROR_FAIL;
506 }
507 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
508 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
509 arm->pc->dirty = 1;
510 arm->pc->valid = 1;
511
512 /* called it now before restoring context because it uses cpu
513 * register r0 for restoring system control register */
514 retval = aarch64_restore_system_control_reg(target);
515 if (retval == ERROR_OK)
516 retval = aarch64_restore_context(target, handle_breakpoints);
517
518 return retval;
519 }
520
521 static int aarch64_internal_restart(struct target *target, bool slave_pe)
522 {
523 struct armv8_common *armv8 = target_to_armv8(target);
524 struct arm *arm = &armv8->arm;
525 int retval;
526 uint32_t dscr;
527 /*
528 * * Restart core and wait for it to be started. Clear ITRen and sticky
529 * * exception flags: see ARMv7 ARM, C5.9.
530 *
531 * REVISIT: for single stepping, we probably want to
532 * disable IRQs by default, with optional override...
533 */
534
535 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
536 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if ((dscr & DSCR_ITE) == 0)
541 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
542
543 /* make sure to acknowledge the halt event before resuming */
544 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
545 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
546
547 /*
548 * open the CTI gate for channel 1 so that the restart events
549 * get passed along to all PEs
550 */
551 if (retval == ERROR_OK)
552 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
553 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
554 if (retval != ERROR_OK)
555 return retval;
556
557 if (!slave_pe) {
558 /* trigger an event on channel 1, generates a restart request to the PE */
559 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
560 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
561 if (retval != ERROR_OK)
562 return retval;
563
564 long long then = timeval_ms();
565 for (;; ) {
566 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
567 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
568 if (retval != ERROR_OK)
569 return retval;
570 if ((dscr & DSCR_HDE) != 0)
571 break;
572 if (timeval_ms() > then + 1000) {
573 LOG_ERROR("Timeout waiting for resume");
574 return ERROR_FAIL;
575 }
576 }
577 }
578
579 target->debug_reason = DBG_REASON_NOTHALTED;
580 target->state = TARGET_RUNNING;
581
582 /* registers are now invalid */
583 register_cache_invalidate(arm->core_cache);
584 register_cache_invalidate(arm->core_cache->next);
585
586 return ERROR_OK;
587 }
588
589 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
590 {
591 int retval = 0;
592 struct target_list *head;
593 struct target *curr;
594 uint64_t address;
595 head = target->head;
596 while (head != (struct target_list *)NULL) {
597 curr = head->target;
598 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
599 /* resume current address , not in step mode */
600 retval += aarch64_internal_restore(curr, 1, &address,
601 handle_breakpoints, 0);
602 retval += aarch64_internal_restart(curr, true);
603 }
604 head = head->next;
605
606 }
607 return retval;
608 }
609
610 static int aarch64_resume(struct target *target, int current,
611 target_addr_t address, int handle_breakpoints, int debug_execution)
612 {
613 int retval = 0;
614 uint64_t addr = address;
615
616 /* dummy resume for smp toggle in order to reduce gdb impact */
617 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
618 /* simulate a start and halt of target */
619 target->gdb_service->target = NULL;
620 target->gdb_service->core[0] = target->gdb_service->core[1];
621 /* fake resume at next poll we play the target core[1], see poll*/
622 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
623 return 0;
624 }
625 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
626 debug_execution);
627 if (target->smp) {
628 target->gdb_service->core[0] = -1;
629 retval = aarch64_restore_smp(target, handle_breakpoints);
630 if (retval != ERROR_OK)
631 return retval;
632 }
633 aarch64_internal_restart(target, false);
634
635 if (!debug_execution) {
636 target->state = TARGET_RUNNING;
637 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
638 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
639 } else {
640 target->state = TARGET_DEBUG_RUNNING;
641 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
642 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
643 }
644
645 return ERROR_OK;
646 }
647
648 static int aarch64_debug_entry(struct target *target)
649 {
650 int retval = ERROR_OK;
651 struct aarch64_common *aarch64 = target_to_aarch64(target);
652 struct armv8_common *armv8 = target_to_armv8(target);
653 struct arm_dpm *dpm = &armv8->dpm;
654 enum arm_state core_state;
655
656 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
657
658 dpm->dscr = aarch64->cpudbg_dscr;
659 core_state = armv8_dpm_get_core_state(dpm);
660 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
661 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
662
663 /* make sure to clear all sticky errors */
664 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
665 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
666
667 /* discard async exceptions */
668 if (retval == ERROR_OK)
669 retval = dpm->instr_cpsr_sync(dpm);
670
671 if (retval != ERROR_OK)
672 return retval;
673
674 /* Examine debug reason */
675 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
676
677 /* save address of instruction that triggered the watchpoint? */
678 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
679 uint32_t tmp;
680 uint64_t wfar = 0;
681
682 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
683 armv8->debug_base + CPUV8_DBG_WFAR1,
684 &tmp);
685 if (retval != ERROR_OK)
686 return retval;
687 wfar = tmp;
688 wfar = (wfar << 32);
689 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
690 armv8->debug_base + CPUV8_DBG_WFAR0,
691 &tmp);
692 if (retval != ERROR_OK)
693 return retval;
694 wfar |= tmp;
695 armv8_dpm_report_wfar(&armv8->dpm, wfar);
696 }
697
698 retval = armv8_dpm_read_current_registers(&armv8->dpm);
699
700 if (retval == ERROR_OK && armv8->post_debug_entry)
701 retval = armv8->post_debug_entry(target);
702
703 return retval;
704 }
705
706 static int aarch64_post_debug_entry(struct target *target)
707 {
708 struct aarch64_common *aarch64 = target_to_aarch64(target);
709 struct armv8_common *armv8 = &aarch64->armv8_common;
710 int retval;
711
712 /* clear sticky errors */
713 mem_ap_write_atomic_u32(armv8->debug_ap,
714 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
715
716 switch (armv8->arm.core_mode) {
717 case ARMV8_64_EL0T:
718 armv8_dpm_modeswitch(&armv8->dpm, ARMV8_64_EL1H);
719 /* fall through */
720 case ARMV8_64_EL1T:
721 case ARMV8_64_EL1H:
722 retval = armv8->arm.mrs(target, 3, /*op 0*/
723 0, 0, /* op1, op2 */
724 1, 0, /* CRn, CRm */
725 &aarch64->system_control_reg);
726 if (retval != ERROR_OK)
727 return retval;
728 break;
729 case ARMV8_64_EL2T:
730 case ARMV8_64_EL2H:
731 retval = armv8->arm.mrs(target, 3, /*op 0*/
732 4, 0, /* op1, op2 */
733 1, 0, /* CRn, CRm */
734 &aarch64->system_control_reg);
735 if (retval != ERROR_OK)
736 return retval;
737 break;
738 case ARMV8_64_EL3H:
739 case ARMV8_64_EL3T:
740 retval = armv8->arm.mrs(target, 3, /*op 0*/
741 6, 0, /* op1, op2 */
742 1, 0, /* CRn, CRm */
743 &aarch64->system_control_reg);
744 if (retval != ERROR_OK)
745 return retval;
746 break;
747
748 case ARM_MODE_SVC:
749 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
750 if (retval != ERROR_OK)
751 return retval;
752 break;
753
754 default:
755 LOG_INFO("cannot read system control register in this mode");
756 break;
757 }
758
759 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
760
761 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
762 aarch64->system_control_reg_curr = aarch64->system_control_reg;
763
764 if (armv8->armv8_mmu.armv8_cache.info == -1) {
765 armv8_identify_cache(armv8);
766 armv8_read_mpidr(armv8);
767 }
768
769 armv8->armv8_mmu.mmu_enabled =
770 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
771 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
772 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
773 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
774 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
775 aarch64->curr_mode = armv8->arm.core_mode;
776 return ERROR_OK;
777 }
778
779 static int aarch64_step(struct target *target, int current, target_addr_t address,
780 int handle_breakpoints)
781 {
782 struct armv8_common *armv8 = target_to_armv8(target);
783 int retval;
784 uint32_t edecr;
785
786 if (target->state != TARGET_HALTED) {
787 LOG_WARNING("target not halted");
788 return ERROR_TARGET_NOT_HALTED;
789 }
790
791 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
792 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
793 if (retval != ERROR_OK)
794 return retval;
795
796 /* make sure EDECR.SS is not set when restoring the register */
797 edecr &= ~0x4;
798
799 /* set EDECR.SS to enter hardware step mode */
800 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
801 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
802 if (retval != ERROR_OK)
803 return retval;
804
805 /* disable interrupts while stepping */
806 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
807 if (retval != ERROR_OK)
808 return ERROR_OK;
809
810 /* resume the target */
811 retval = aarch64_resume(target, current, address, 0, 0);
812 if (retval != ERROR_OK)
813 return retval;
814
815 long long then = timeval_ms();
816 while (target->state != TARGET_HALTED) {
817 retval = aarch64_poll(target);
818 if (retval != ERROR_OK)
819 return retval;
820 if (timeval_ms() > then + 1000) {
821 LOG_ERROR("timeout waiting for target halt");
822 return ERROR_FAIL;
823 }
824 }
825
826 /* restore EDECR */
827 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
828 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
829 if (retval != ERROR_OK)
830 return retval;
831
832 /* restore interrupts */
833 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
834 if (retval != ERROR_OK)
835 return ERROR_OK;
836
837 return ERROR_OK;
838 }
839
840 static int aarch64_restore_context(struct target *target, bool bpwp)
841 {
842 struct armv8_common *armv8 = target_to_armv8(target);
843
844 LOG_DEBUG(" ");
845
846 if (armv8->pre_restore_context)
847 armv8->pre_restore_context(target);
848
849 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
850
851 }
852
853 /*
854 * Cortex-A8 Breakpoint and watchpoint functions
855 */
856
857 /* Setup hardware Breakpoint Register Pair */
858 static int aarch64_set_breakpoint(struct target *target,
859 struct breakpoint *breakpoint, uint8_t matchmode)
860 {
861 int retval;
862 int brp_i = 0;
863 uint32_t control;
864 uint8_t byte_addr_select = 0x0F;
865 struct aarch64_common *aarch64 = target_to_aarch64(target);
866 struct armv8_common *armv8 = &aarch64->armv8_common;
867 struct aarch64_brp *brp_list = aarch64->brp_list;
868
869 if (breakpoint->set) {
870 LOG_WARNING("breakpoint already set");
871 return ERROR_OK;
872 }
873
874 if (breakpoint->type == BKPT_HARD) {
875 int64_t bpt_value;
876 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
877 brp_i++;
878 if (brp_i >= aarch64->brp_num) {
879 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
880 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
881 }
882 breakpoint->set = brp_i + 1;
883 if (breakpoint->length == 2)
884 byte_addr_select = (3 << (breakpoint->address & 0x02));
885 control = ((matchmode & 0x7) << 20)
886 | (1 << 13)
887 | (byte_addr_select << 5)
888 | (3 << 1) | 1;
889 brp_list[brp_i].used = 1;
890 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
891 brp_list[brp_i].control = control;
892 bpt_value = brp_list[brp_i].value;
893
894 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
895 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
896 (uint32_t)(bpt_value & 0xFFFFFFFF));
897 if (retval != ERROR_OK)
898 return retval;
899 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
900 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
901 (uint32_t)(bpt_value >> 32));
902 if (retval != ERROR_OK)
903 return retval;
904
905 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
906 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
907 brp_list[brp_i].control);
908 if (retval != ERROR_OK)
909 return retval;
910 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
911 brp_list[brp_i].control,
912 brp_list[brp_i].value);
913
914 } else if (breakpoint->type == BKPT_SOFT) {
915 uint8_t code[4];
916
917 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
918 retval = target_read_memory(target,
919 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
920 breakpoint->length, 1,
921 breakpoint->orig_instr);
922 if (retval != ERROR_OK)
923 return retval;
924
925 armv8_cache_d_inner_flush_virt(armv8,
926 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
927 breakpoint->length);
928
929 retval = target_write_memory(target,
930 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
931 breakpoint->length, 1, code);
932 if (retval != ERROR_OK)
933 return retval;
934
935 armv8_cache_d_inner_flush_virt(armv8,
936 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
937 breakpoint->length);
938
939 armv8_cache_i_inner_inval_virt(armv8,
940 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
941 breakpoint->length);
942
943 breakpoint->set = 0x11; /* Any nice value but 0 */
944 }
945
946 /* Ensure that halting debug mode is enable */
947 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
948 if (retval != ERROR_OK) {
949 LOG_DEBUG("Failed to set DSCR.HDE");
950 return retval;
951 }
952
953 return ERROR_OK;
954 }
955
956 static int aarch64_set_context_breakpoint(struct target *target,
957 struct breakpoint *breakpoint, uint8_t matchmode)
958 {
959 int retval = ERROR_FAIL;
960 int brp_i = 0;
961 uint32_t control;
962 uint8_t byte_addr_select = 0x0F;
963 struct aarch64_common *aarch64 = target_to_aarch64(target);
964 struct armv8_common *armv8 = &aarch64->armv8_common;
965 struct aarch64_brp *brp_list = aarch64->brp_list;
966
967 if (breakpoint->set) {
968 LOG_WARNING("breakpoint already set");
969 return retval;
970 }
971 /*check available context BRPs*/
972 while ((brp_list[brp_i].used ||
973 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
974 brp_i++;
975
976 if (brp_i >= aarch64->brp_num) {
977 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
978 return ERROR_FAIL;
979 }
980
981 breakpoint->set = brp_i + 1;
982 control = ((matchmode & 0x7) << 20)
983 | (1 << 13)
984 | (byte_addr_select << 5)
985 | (3 << 1) | 1;
986 brp_list[brp_i].used = 1;
987 brp_list[brp_i].value = (breakpoint->asid);
988 brp_list[brp_i].control = control;
989 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
990 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
991 brp_list[brp_i].value);
992 if (retval != ERROR_OK)
993 return retval;
994 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
995 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
996 brp_list[brp_i].control);
997 if (retval != ERROR_OK)
998 return retval;
999 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1000 brp_list[brp_i].control,
1001 brp_list[brp_i].value);
1002 return ERROR_OK;
1003
1004 }
1005
1006 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1007 {
1008 int retval = ERROR_FAIL;
1009 int brp_1 = 0; /* holds the contextID pair */
1010 int brp_2 = 0; /* holds the IVA pair */
1011 uint32_t control_CTX, control_IVA;
1012 uint8_t CTX_byte_addr_select = 0x0F;
1013 uint8_t IVA_byte_addr_select = 0x0F;
1014 uint8_t CTX_machmode = 0x03;
1015 uint8_t IVA_machmode = 0x01;
1016 struct aarch64_common *aarch64 = target_to_aarch64(target);
1017 struct armv8_common *armv8 = &aarch64->armv8_common;
1018 struct aarch64_brp *brp_list = aarch64->brp_list;
1019
1020 if (breakpoint->set) {
1021 LOG_WARNING("breakpoint already set");
1022 return retval;
1023 }
1024 /*check available context BRPs*/
1025 while ((brp_list[brp_1].used ||
1026 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1027 brp_1++;
1028
1029 printf("brp(CTX) found num: %d\n", brp_1);
1030 if (brp_1 >= aarch64->brp_num) {
1031 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1032 return ERROR_FAIL;
1033 }
1034
1035 while ((brp_list[brp_2].used ||
1036 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1037 brp_2++;
1038
1039 printf("brp(IVA) found num: %d\n", brp_2);
1040 if (brp_2 >= aarch64->brp_num) {
1041 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1042 return ERROR_FAIL;
1043 }
1044
1045 breakpoint->set = brp_1 + 1;
1046 breakpoint->linked_BRP = brp_2;
1047 control_CTX = ((CTX_machmode & 0x7) << 20)
1048 | (brp_2 << 16)
1049 | (0 << 14)
1050 | (CTX_byte_addr_select << 5)
1051 | (3 << 1) | 1;
1052 brp_list[brp_1].used = 1;
1053 brp_list[brp_1].value = (breakpoint->asid);
1054 brp_list[brp_1].control = control_CTX;
1055 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1056 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1057 brp_list[brp_1].value);
1058 if (retval != ERROR_OK)
1059 return retval;
1060 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1061 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1062 brp_list[brp_1].control);
1063 if (retval != ERROR_OK)
1064 return retval;
1065
1066 control_IVA = ((IVA_machmode & 0x7) << 20)
1067 | (brp_1 << 16)
1068 | (1 << 13)
1069 | (IVA_byte_addr_select << 5)
1070 | (3 << 1) | 1;
1071 brp_list[brp_2].used = 1;
1072 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1073 brp_list[brp_2].control = control_IVA;
1074 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1075 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1076 brp_list[brp_2].value & 0xFFFFFFFF);
1077 if (retval != ERROR_OK)
1078 return retval;
1079 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1080 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1081 brp_list[brp_2].value >> 32);
1082 if (retval != ERROR_OK)
1083 return retval;
1084 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1085 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1086 brp_list[brp_2].control);
1087 if (retval != ERROR_OK)
1088 return retval;
1089
1090 return ERROR_OK;
1091 }
1092
1093 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1094 {
1095 int retval;
1096 struct aarch64_common *aarch64 = target_to_aarch64(target);
1097 struct armv8_common *armv8 = &aarch64->armv8_common;
1098 struct aarch64_brp *brp_list = aarch64->brp_list;
1099
1100 if (!breakpoint->set) {
1101 LOG_WARNING("breakpoint not set");
1102 return ERROR_OK;
1103 }
1104
1105 if (breakpoint->type == BKPT_HARD) {
1106 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1107 int brp_i = breakpoint->set - 1;
1108 int brp_j = breakpoint->linked_BRP;
1109 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1110 LOG_DEBUG("Invalid BRP number in breakpoint");
1111 return ERROR_OK;
1112 }
1113 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1114 brp_list[brp_i].control, brp_list[brp_i].value);
1115 brp_list[brp_i].used = 0;
1116 brp_list[brp_i].value = 0;
1117 brp_list[brp_i].control = 0;
1118 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1119 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1120 brp_list[brp_i].control);
1121 if (retval != ERROR_OK)
1122 return retval;
1123 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1124 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1125 (uint32_t)brp_list[brp_i].value);
1126 if (retval != ERROR_OK)
1127 return retval;
1128 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1129 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1130 (uint32_t)brp_list[brp_i].value);
1131 if (retval != ERROR_OK)
1132 return retval;
1133 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1134 LOG_DEBUG("Invalid BRP number in breakpoint");
1135 return ERROR_OK;
1136 }
1137 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1138 brp_list[brp_j].control, brp_list[brp_j].value);
1139 brp_list[brp_j].used = 0;
1140 brp_list[brp_j].value = 0;
1141 brp_list[brp_j].control = 0;
1142 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1143 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1144 brp_list[brp_j].control);
1145 if (retval != ERROR_OK)
1146 return retval;
1147 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1148 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1149 (uint32_t)brp_list[brp_j].value);
1150 if (retval != ERROR_OK)
1151 return retval;
1152 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1153 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1154 (uint32_t)brp_list[brp_j].value);
1155 if (retval != ERROR_OK)
1156 return retval;
1157
1158 breakpoint->linked_BRP = 0;
1159 breakpoint->set = 0;
1160 return ERROR_OK;
1161
1162 } else {
1163 int brp_i = breakpoint->set - 1;
1164 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1165 LOG_DEBUG("Invalid BRP number in breakpoint");
1166 return ERROR_OK;
1167 }
1168 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1169 brp_list[brp_i].control, brp_list[brp_i].value);
1170 brp_list[brp_i].used = 0;
1171 brp_list[brp_i].value = 0;
1172 brp_list[brp_i].control = 0;
1173 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1174 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1175 brp_list[brp_i].control);
1176 if (retval != ERROR_OK)
1177 return retval;
1178 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1179 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1180 brp_list[brp_i].value);
1181 if (retval != ERROR_OK)
1182 return retval;
1183
1184 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1185 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1186 (uint32_t)brp_list[brp_i].value);
1187 if (retval != ERROR_OK)
1188 return retval;
1189 breakpoint->set = 0;
1190 return ERROR_OK;
1191 }
1192 } else {
1193 /* restore original instruction (kept in target endianness) */
1194
1195 armv8_cache_d_inner_flush_virt(armv8,
1196 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1197 breakpoint->length);
1198
1199 if (breakpoint->length == 4) {
1200 retval = target_write_memory(target,
1201 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1202 4, 1, breakpoint->orig_instr);
1203 if (retval != ERROR_OK)
1204 return retval;
1205 } else {
1206 retval = target_write_memory(target,
1207 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1208 2, 1, breakpoint->orig_instr);
1209 if (retval != ERROR_OK)
1210 return retval;
1211 }
1212
1213 armv8_cache_d_inner_flush_virt(armv8,
1214 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1215 breakpoint->length);
1216
1217 armv8_cache_i_inner_inval_virt(armv8,
1218 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1219 breakpoint->length);
1220 }
1221 breakpoint->set = 0;
1222
1223 return ERROR_OK;
1224 }
1225
1226 static int aarch64_add_breakpoint(struct target *target,
1227 struct breakpoint *breakpoint)
1228 {
1229 struct aarch64_common *aarch64 = target_to_aarch64(target);
1230
1231 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1232 LOG_INFO("no hardware breakpoint available");
1233 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1234 }
1235
1236 if (breakpoint->type == BKPT_HARD)
1237 aarch64->brp_num_available--;
1238
1239 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1240 }
1241
1242 static int aarch64_add_context_breakpoint(struct target *target,
1243 struct breakpoint *breakpoint)
1244 {
1245 struct aarch64_common *aarch64 = target_to_aarch64(target);
1246
1247 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1248 LOG_INFO("no hardware breakpoint available");
1249 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1250 }
1251
1252 if (breakpoint->type == BKPT_HARD)
1253 aarch64->brp_num_available--;
1254
1255 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1256 }
1257
1258 static int aarch64_add_hybrid_breakpoint(struct target *target,
1259 struct breakpoint *breakpoint)
1260 {
1261 struct aarch64_common *aarch64 = target_to_aarch64(target);
1262
1263 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1264 LOG_INFO("no hardware breakpoint available");
1265 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1266 }
1267
1268 if (breakpoint->type == BKPT_HARD)
1269 aarch64->brp_num_available--;
1270
1271 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1272 }
1273
1274
1275 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1276 {
1277 struct aarch64_common *aarch64 = target_to_aarch64(target);
1278
1279 #if 0
1280 /* It is perfectly possible to remove breakpoints while the target is running */
1281 if (target->state != TARGET_HALTED) {
1282 LOG_WARNING("target not halted");
1283 return ERROR_TARGET_NOT_HALTED;
1284 }
1285 #endif
1286
1287 if (breakpoint->set) {
1288 aarch64_unset_breakpoint(target, breakpoint);
1289 if (breakpoint->type == BKPT_HARD)
1290 aarch64->brp_num_available++;
1291 }
1292
1293 return ERROR_OK;
1294 }
1295
1296 /*
1297 * Cortex-A8 Reset functions
1298 */
1299
1300 static int aarch64_assert_reset(struct target *target)
1301 {
1302 struct armv8_common *armv8 = target_to_armv8(target);
1303
1304 LOG_DEBUG(" ");
1305
1306 /* FIXME when halt is requested, make it work somehow... */
1307
1308 /* Issue some kind of warm reset. */
1309 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1310 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1311 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1312 /* REVISIT handle "pulls" cases, if there's
1313 * hardware that needs them to work.
1314 */
1315 jtag_add_reset(0, 1);
1316 } else {
1317 LOG_ERROR("%s: how to reset?", target_name(target));
1318 return ERROR_FAIL;
1319 }
1320
1321 /* registers are now invalid */
1322 register_cache_invalidate(armv8->arm.core_cache);
1323
1324 target->state = TARGET_RESET;
1325
1326 return ERROR_OK;
1327 }
1328
1329 static int aarch64_deassert_reset(struct target *target)
1330 {
1331 int retval;
1332
1333 LOG_DEBUG(" ");
1334
1335 /* be certain SRST is off */
1336 jtag_add_reset(0, 0);
1337
1338 retval = aarch64_poll(target);
1339 if (retval != ERROR_OK)
1340 return retval;
1341
1342 if (target->reset_halt) {
1343 if (target->state != TARGET_HALTED) {
1344 LOG_WARNING("%s: ran after reset and before halt ...",
1345 target_name(target));
1346 retval = target_halt(target);
1347 if (retval != ERROR_OK)
1348 return retval;
1349 }
1350 }
1351
1352 return ERROR_OK;
1353 }
1354
1355 static int aarch64_write_apb_ap_memory(struct target *target,
1356 uint64_t address, uint32_t size,
1357 uint32_t count, const uint8_t *buffer)
1358 {
1359 /* write memory through APB-AP */
1360 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1361 struct armv8_common *armv8 = target_to_armv8(target);
1362 struct arm_dpm *dpm = &armv8->dpm;
1363 struct arm *arm = &armv8->arm;
1364 int total_bytes = count * size;
1365 int total_u32;
1366 int start_byte = address & 0x3;
1367 int end_byte = (address + total_bytes) & 0x3;
1368 struct reg *reg;
1369 uint32_t dscr;
1370 uint8_t *tmp_buff = NULL;
1371
1372 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1373 address, size, count);
1374 if (target->state != TARGET_HALTED) {
1375 LOG_WARNING("target not halted");
1376 return ERROR_TARGET_NOT_HALTED;
1377 }
1378
1379 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1380
1381 /* Mark register R0 as dirty, as it will be used
1382 * for transferring the data.
1383 * It will be restored automatically when exiting
1384 * debug mode
1385 */
1386 reg = armv8_reg_current(arm, 1);
1387 reg->dirty = true;
1388
1389 reg = armv8_reg_current(arm, 0);
1390 reg->dirty = true;
1391
1392 /* clear any abort */
1393 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1394 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1395 if (retval != ERROR_OK)
1396 return retval;
1397
1398
1399 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1400
1401 /* The algorithm only copies 32 bit words, so the buffer
1402 * should be expanded to include the words at either end.
1403 * The first and last words will be read first to avoid
1404 * corruption if needed.
1405 */
1406 tmp_buff = malloc(total_u32 * 4);
1407
1408 if ((start_byte != 0) && (total_u32 > 1)) {
1409 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1410 * the other bytes in the word.
1411 */
1412 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1413 if (retval != ERROR_OK)
1414 goto error_free_buff_w;
1415 }
1416
1417 /* If end of write is not aligned, or the write is less than 4 bytes */
1418 if ((end_byte != 0) ||
1419 ((total_u32 == 1) && (total_bytes != 4))) {
1420
1421 /* Read the last word to avoid corruption during 32 bit write */
1422 int mem_offset = (total_u32-1) * 4;
1423 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1424 if (retval != ERROR_OK)
1425 goto error_free_buff_w;
1426 }
1427
1428 /* Copy the write buffer over the top of the temporary buffer */
1429 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1430
1431 /* We now have a 32 bit aligned buffer that can be written */
1432
1433 /* Read DSCR */
1434 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1435 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1436 if (retval != ERROR_OK)
1437 goto error_free_buff_w;
1438
1439 /* Set Normal access mode */
1440 dscr = (dscr & ~DSCR_MA);
1441 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1442 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1443
1444 if (arm->core_state == ARM_STATE_AARCH64) {
1445 /* Write X0 with value 'address' using write procedure */
1446 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1447 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1448 retval = dpm->instr_write_data_dcc_64(dpm,
1449 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1450 } else {
1451 /* Write R0 with value 'address' using write procedure */
1452 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1453 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1454 dpm->instr_write_data_dcc(dpm,
1455 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1456
1457 }
1458 /* Step 1.d - Change DCC to memory mode */
1459 dscr = dscr | DSCR_MA;
1460 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1461 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1462 if (retval != ERROR_OK)
1463 goto error_unset_dtr_w;
1464
1465
1466 /* Step 2.a - Do the write */
1467 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1468 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1469 if (retval != ERROR_OK)
1470 goto error_unset_dtr_w;
1471
1472 /* Step 3.a - Switch DTR mode back to Normal mode */
1473 dscr = (dscr & ~DSCR_MA);
1474 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1475 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1476 if (retval != ERROR_OK)
1477 goto error_unset_dtr_w;
1478
1479 /* Check for sticky abort flags in the DSCR */
1480 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1481 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1482 if (retval != ERROR_OK)
1483 goto error_free_buff_w;
1484
1485 dpm->dscr = dscr;
1486 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1487 /* Abort occurred - clear it and exit */
1488 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1489 mem_ap_write_atomic_u32(armv8->debug_ap,
1490 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1491 armv8_dpm_handle_exception(dpm);
1492 goto error_free_buff_w;
1493 }
1494
1495 /* Done */
1496 free(tmp_buff);
1497 return ERROR_OK;
1498
1499 error_unset_dtr_w:
1500 /* Unset DTR mode */
1501 mem_ap_read_atomic_u32(armv8->debug_ap,
1502 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1503 dscr = (dscr & ~DSCR_MA);
1504 mem_ap_write_atomic_u32(armv8->debug_ap,
1505 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1506 error_free_buff_w:
1507 LOG_ERROR("error");
1508 free(tmp_buff);
1509 return ERROR_FAIL;
1510 }
1511
1512 static int aarch64_read_apb_ap_memory(struct target *target,
1513 target_addr_t address, uint32_t size,
1514 uint32_t count, uint8_t *buffer)
1515 {
1516 /* read memory through APB-AP */
1517 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1518 struct armv8_common *armv8 = target_to_armv8(target);
1519 struct arm_dpm *dpm = &armv8->dpm;
1520 struct arm *arm = &armv8->arm;
1521 int total_bytes = count * size;
1522 int total_u32;
1523 int start_byte = address & 0x3;
1524 int end_byte = (address + total_bytes) & 0x3;
1525 struct reg *reg;
1526 uint32_t dscr;
1527 uint8_t *tmp_buff = NULL;
1528 uint8_t *u8buf_ptr;
1529 uint32_t value;
1530
1531 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1532 address, size, count);
1533 if (target->state != TARGET_HALTED) {
1534 LOG_WARNING("target not halted");
1535 return ERROR_TARGET_NOT_HALTED;
1536 }
1537
1538 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1539 /* Mark register X0, X1 as dirty, as it will be used
1540 * for transferring the data.
1541 * It will be restored automatically when exiting
1542 * debug mode
1543 */
1544 reg = armv8_reg_current(arm, 1);
1545 reg->dirty = true;
1546
1547 reg = armv8_reg_current(arm, 0);
1548 reg->dirty = true;
1549
1550 /* clear any abort */
1551 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1552 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1553 if (retval != ERROR_OK)
1554 goto error_free_buff_r;
1555
1556 /* Read DSCR */
1557 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1558 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1559
1560 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1561
1562 /* Set Normal access mode */
1563 dscr = (dscr & ~DSCR_MA);
1564 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1565 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1566
1567 if (arm->core_state == ARM_STATE_AARCH64) {
1568 /* Write X0 with value 'address' using write procedure */
1569 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1570 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1571 retval += dpm->instr_write_data_dcc_64(dpm,
1572 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1573 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1574 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1575 /* Step 1.e - Change DCC to memory mode */
1576 dscr = dscr | DSCR_MA;
1577 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1578 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1579 /* Step 1.f - read DBGDTRTX and discard the value */
1580 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1581 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1582 } else {
1583 /* Write R0 with value 'address' using write procedure */
1584 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1585 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1586 retval += dpm->instr_write_data_dcc(dpm,
1587 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1588 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1589 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1590 /* Step 1.e - Change DCC to memory mode */
1591 dscr = dscr | DSCR_MA;
1592 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1593 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1594 /* Step 1.f - read DBGDTRTX and discard the value */
1595 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1596 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1597
1598 }
1599 if (retval != ERROR_OK)
1600 goto error_unset_dtr_r;
1601
1602 /* Optimize the read as much as we can, either way we read in a single pass */
1603 if ((start_byte) || (end_byte)) {
1604 /* The algorithm only copies 32 bit words, so the buffer
1605 * should be expanded to include the words at either end.
1606 * The first and last words will be read into a temp buffer
1607 * to avoid corruption
1608 */
1609 tmp_buff = malloc(total_u32 * 4);
1610 if (!tmp_buff)
1611 goto error_unset_dtr_r;
1612
1613 /* use the tmp buffer to read the entire data */
1614 u8buf_ptr = tmp_buff;
1615 } else
1616 /* address and read length are aligned so read directly into the passed buffer */
1617 u8buf_ptr = buffer;
1618
1619 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1620 * Abort flags are sticky, so can be read at end of transactions
1621 *
1622 * This data is read in aligned to 32 bit boundary.
1623 */
1624
1625 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1626 * increments X0 by 4. */
1627 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1628 armv8->debug_base + CPUV8_DBG_DTRTX);
1629 if (retval != ERROR_OK)
1630 goto error_unset_dtr_r;
1631
1632 /* Step 3.a - set DTR access mode back to Normal mode */
1633 dscr = (dscr & ~DSCR_MA);
1634 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1635 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1636 if (retval != ERROR_OK)
1637 goto error_free_buff_r;
1638
1639 /* Step 3.b - read DBGDTRTX for the final value */
1640 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1641 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1642 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1643
1644 /* Check for sticky abort flags in the DSCR */
1645 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1646 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1647 if (retval != ERROR_OK)
1648 goto error_free_buff_r;
1649
1650 dpm->dscr = dscr;
1651
1652 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1653 /* Abort occurred - clear it and exit */
1654 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1655 mem_ap_write_atomic_u32(armv8->debug_ap,
1656 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1657 armv8_dpm_handle_exception(dpm);
1658 goto error_free_buff_r;
1659 }
1660
1661 /* check if we need to copy aligned data by applying any shift necessary */
1662 if (tmp_buff) {
1663 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1664 free(tmp_buff);
1665 }
1666
1667 /* Done */
1668 return ERROR_OK;
1669
1670 error_unset_dtr_r:
1671 /* Unset DTR mode */
1672 mem_ap_read_atomic_u32(armv8->debug_ap,
1673 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1674 dscr = (dscr & ~DSCR_MA);
1675 mem_ap_write_atomic_u32(armv8->debug_ap,
1676 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1677 error_free_buff_r:
1678 LOG_ERROR("error");
1679 free(tmp_buff);
1680 return ERROR_FAIL;
1681 }
1682
1683 static int aarch64_read_phys_memory(struct target *target,
1684 target_addr_t address, uint32_t size,
1685 uint32_t count, uint8_t *buffer)
1686 {
1687 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1688 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1689 address, size, count);
1690
1691 if (count && buffer) {
1692 /* read memory through APB-AP */
1693 retval = aarch64_mmu_modify(target, 0);
1694 if (retval != ERROR_OK)
1695 return retval;
1696 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1697 }
1698 return retval;
1699 }
1700
1701 static int aarch64_read_memory(struct target *target, target_addr_t address,
1702 uint32_t size, uint32_t count, uint8_t *buffer)
1703 {
1704 int mmu_enabled = 0;
1705 int retval;
1706
1707 /* aarch64 handles unaligned memory access */
1708 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1709 size, count);
1710
1711 /* determine if MMU was enabled on target stop */
1712 retval = aarch64_mmu(target, &mmu_enabled);
1713 if (retval != ERROR_OK)
1714 return retval;
1715
1716 if (mmu_enabled) {
1717 retval = aarch64_check_address(target, address);
1718 if (retval != ERROR_OK)
1719 return retval;
1720 /* enable MMU as we could have disabled it for phys access */
1721 retval = aarch64_mmu_modify(target, 1);
1722 if (retval != ERROR_OK)
1723 return retval;
1724 }
1725 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1726 }
1727
1728 static int aarch64_write_phys_memory(struct target *target,
1729 target_addr_t address, uint32_t size,
1730 uint32_t count, const uint8_t *buffer)
1731 {
1732 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1733
1734 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1735 size, count);
1736
1737 if (count && buffer) {
1738 /* write memory through APB-AP */
1739 retval = aarch64_mmu_modify(target, 0);
1740 if (retval != ERROR_OK)
1741 return retval;
1742 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1743 }
1744
1745 return retval;
1746 }
1747
1748 static int aarch64_write_memory(struct target *target, target_addr_t address,
1749 uint32_t size, uint32_t count, const uint8_t *buffer)
1750 {
1751 int mmu_enabled = 0;
1752 int retval;
1753
1754 /* aarch64 handles unaligned memory access */
1755 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1756 "; count %" PRId32, address, size, count);
1757
1758 /* determine if MMU was enabled on target stop */
1759 retval = aarch64_mmu(target, &mmu_enabled);
1760 if (retval != ERROR_OK)
1761 return retval;
1762
1763 if (mmu_enabled) {
1764 retval = aarch64_check_address(target, address);
1765 if (retval != ERROR_OK)
1766 return retval;
1767 /* enable MMU as we could have disabled it for phys access */
1768 retval = aarch64_mmu_modify(target, 1);
1769 if (retval != ERROR_OK)
1770 return retval;
1771 }
1772 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1773 }
1774
1775 static int aarch64_handle_target_request(void *priv)
1776 {
1777 struct target *target = priv;
1778 struct armv8_common *armv8 = target_to_armv8(target);
1779 int retval;
1780
1781 if (!target_was_examined(target))
1782 return ERROR_OK;
1783 if (!target->dbg_msg_enabled)
1784 return ERROR_OK;
1785
1786 if (target->state == TARGET_RUNNING) {
1787 uint32_t request;
1788 uint32_t dscr;
1789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1790 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1791
1792 /* check if we have data */
1793 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1794 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1795 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1796 if (retval == ERROR_OK) {
1797 target_request(target, request);
1798 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1799 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1800 }
1801 }
1802 }
1803
1804 return ERROR_OK;
1805 }
1806
1807 static int aarch64_examine_first(struct target *target)
1808 {
1809 struct aarch64_common *aarch64 = target_to_aarch64(target);
1810 struct armv8_common *armv8 = &aarch64->armv8_common;
1811 struct adiv5_dap *swjdp = armv8->arm.dap;
1812 int i;
1813 int retval = ERROR_OK;
1814 uint64_t debug, ttypr;
1815 uint32_t cpuid;
1816 uint32_t tmp0, tmp1;
1817 debug = ttypr = cpuid = 0;
1818
1819 /* We do one extra read to ensure DAP is configured,
1820 * we call ahbap_debugport_init(swjdp) instead
1821 */
1822 retval = dap_dp_init(swjdp);
1823 if (retval != ERROR_OK)
1824 return retval;
1825
1826 /* Search for the APB-AB - it is needed for access to debug registers */
1827 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1828 if (retval != ERROR_OK) {
1829 LOG_ERROR("Could not find APB-AP for debug access");
1830 return retval;
1831 }
1832
1833 retval = mem_ap_init(armv8->debug_ap);
1834 if (retval != ERROR_OK) {
1835 LOG_ERROR("Could not initialize the APB-AP");
1836 return retval;
1837 }
1838
1839 armv8->debug_ap->memaccess_tck = 80;
1840
1841 if (!target->dbgbase_set) {
1842 uint32_t dbgbase;
1843 /* Get ROM Table base */
1844 uint32_t apid;
1845 int32_t coreidx = target->coreid;
1846 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1847 if (retval != ERROR_OK)
1848 return retval;
1849 /* Lookup 0x15 -- Processor DAP */
1850 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1851 &armv8->debug_base, &coreidx);
1852 if (retval != ERROR_OK)
1853 return retval;
1854 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1855 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1856 } else
1857 armv8->debug_base = target->dbgbase;
1858
1859 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1860 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1861 if (retval != ERROR_OK) {
1862 LOG_DEBUG("LOCK debug access fail");
1863 return retval;
1864 }
1865
1866 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1867 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1868 if (retval != ERROR_OK) {
1869 LOG_DEBUG("Examine %s failed", "oslock");
1870 return retval;
1871 }
1872
1873 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1874 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1875 if (retval != ERROR_OK) {
1876 LOG_DEBUG("Examine %s failed", "CPUID");
1877 return retval;
1878 }
1879
1880 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1881 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1882 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1883 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1884 if (retval != ERROR_OK) {
1885 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1886 return retval;
1887 }
1888 ttypr |= tmp1;
1889 ttypr = (ttypr << 32) | tmp0;
1890
1891 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1892 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1893 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1894 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1895 if (retval != ERROR_OK) {
1896 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1897 return retval;
1898 }
1899 debug |= tmp1;
1900 debug = (debug << 32) | tmp0;
1901
1902 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1903 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1904 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1905
1906 if (target->ctibase == 0) {
1907 /* assume a v8 rom table layout */
1908 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1909 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1910 } else
1911 armv8->cti_base = target->ctibase;
1912
1913 armv8->arm.core_type = ARM_MODE_MON;
1914 retval = aarch64_dpm_setup(aarch64, debug);
1915 if (retval != ERROR_OK)
1916 return retval;
1917
1918 /* Setup Breakpoint Register Pairs */
1919 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1920 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1921 aarch64->brp_num_available = aarch64->brp_num;
1922 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1923 for (i = 0; i < aarch64->brp_num; i++) {
1924 aarch64->brp_list[i].used = 0;
1925 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1926 aarch64->brp_list[i].type = BRP_NORMAL;
1927 else
1928 aarch64->brp_list[i].type = BRP_CONTEXT;
1929 aarch64->brp_list[i].value = 0;
1930 aarch64->brp_list[i].control = 0;
1931 aarch64->brp_list[i].BRPn = i;
1932 }
1933
1934 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1935
1936 target_set_examined(target);
1937 return ERROR_OK;
1938 }
1939
1940 static int aarch64_examine(struct target *target)
1941 {
1942 int retval = ERROR_OK;
1943
1944 /* don't re-probe hardware after each reset */
1945 if (!target_was_examined(target))
1946 retval = aarch64_examine_first(target);
1947
1948 /* Configure core debug access */
1949 if (retval == ERROR_OK)
1950 retval = aarch64_init_debug_access(target);
1951
1952 return retval;
1953 }
1954
1955 /*
1956 * Cortex-A8 target creation and initialization
1957 */
1958
1959 static int aarch64_init_target(struct command_context *cmd_ctx,
1960 struct target *target)
1961 {
1962 /* examine_first() does a bunch of this */
1963 return ERROR_OK;
1964 }
1965
1966 static int aarch64_init_arch_info(struct target *target,
1967 struct aarch64_common *aarch64, struct jtag_tap *tap)
1968 {
1969 struct armv8_common *armv8 = &aarch64->armv8_common;
1970 struct adiv5_dap *dap = armv8->arm.dap;
1971
1972 armv8->arm.dap = dap;
1973
1974 /* Setup struct aarch64_common */
1975 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1976 /* tap has no dap initialized */
1977 if (!tap->dap) {
1978 tap->dap = dap_init();
1979
1980 /* Leave (only) generic DAP stuff for debugport_init() */
1981 tap->dap->tap = tap;
1982 }
1983
1984 armv8->arm.dap = tap->dap;
1985
1986 aarch64->fast_reg_read = 0;
1987
1988 /* register arch-specific functions */
1989 armv8->examine_debug_reason = NULL;
1990
1991 armv8->post_debug_entry = aarch64_post_debug_entry;
1992
1993 armv8->pre_restore_context = NULL;
1994
1995 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1996
1997 /* REVISIT v7a setup should be in a v7a-specific routine */
1998 armv8_init_arch_info(target, armv8);
1999 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2000
2001 return ERROR_OK;
2002 }
2003
2004 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2005 {
2006 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2007
2008 return aarch64_init_arch_info(target, aarch64, target->tap);
2009 }
2010
2011 static int aarch64_mmu(struct target *target, int *enabled)
2012 {
2013 if (target->state != TARGET_HALTED) {
2014 LOG_ERROR("%s: target not halted", __func__);
2015 return ERROR_TARGET_INVALID;
2016 }
2017
2018 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2019 return ERROR_OK;
2020 }
2021
2022 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2023 target_addr_t *phys)
2024 {
2025 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2026 }
2027
2028 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2029 {
2030 struct target *target = get_current_target(CMD_CTX);
2031 struct armv8_common *armv8 = target_to_armv8(target);
2032
2033 return armv8_handle_cache_info_command(CMD_CTX,
2034 &armv8->armv8_mmu.armv8_cache);
2035 }
2036
2037
2038 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2039 {
2040 struct target *target = get_current_target(CMD_CTX);
2041 if (!target_was_examined(target)) {
2042 LOG_ERROR("target not examined yet");
2043 return ERROR_FAIL;
2044 }
2045
2046 return aarch64_init_debug_access(target);
2047 }
2048 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2049 {
2050 struct target *target = get_current_target(CMD_CTX);
2051 /* check target is an smp target */
2052 struct target_list *head;
2053 struct target *curr;
2054 head = target->head;
2055 target->smp = 0;
2056 if (head != (struct target_list *)NULL) {
2057 while (head != (struct target_list *)NULL) {
2058 curr = head->target;
2059 curr->smp = 0;
2060 head = head->next;
2061 }
2062 /* fixes the target display to the debugger */
2063 target->gdb_service->target = target;
2064 }
2065 return ERROR_OK;
2066 }
2067
2068 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2069 {
2070 struct target *target = get_current_target(CMD_CTX);
2071 struct target_list *head;
2072 struct target *curr;
2073 head = target->head;
2074 if (head != (struct target_list *)NULL) {
2075 target->smp = 1;
2076 while (head != (struct target_list *)NULL) {
2077 curr = head->target;
2078 curr->smp = 1;
2079 head = head->next;
2080 }
2081 }
2082 return ERROR_OK;
2083 }
2084
2085 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2086 {
2087 struct target *target = get_current_target(CMD_CTX);
2088 int retval = ERROR_OK;
2089 struct target_list *head;
2090 head = target->head;
2091 if (head != (struct target_list *)NULL) {
2092 if (CMD_ARGC == 1) {
2093 int coreid = 0;
2094 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2095 if (ERROR_OK != retval)
2096 return retval;
2097 target->gdb_service->core[1] = coreid;
2098
2099 }
2100 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2101 , target->gdb_service->core[1]);
2102 }
2103 return ERROR_OK;
2104 }
2105
2106 static const struct command_registration aarch64_exec_command_handlers[] = {
2107 {
2108 .name = "cache_info",
2109 .handler = aarch64_handle_cache_info_command,
2110 .mode = COMMAND_EXEC,
2111 .help = "display information about target caches",
2112 .usage = "",
2113 },
2114 {
2115 .name = "dbginit",
2116 .handler = aarch64_handle_dbginit_command,
2117 .mode = COMMAND_EXEC,
2118 .help = "Initialize core debug",
2119 .usage = "",
2120 },
2121 { .name = "smp_off",
2122 .handler = aarch64_handle_smp_off_command,
2123 .mode = COMMAND_EXEC,
2124 .help = "Stop smp handling",
2125 .usage = "",
2126 },
2127 {
2128 .name = "smp_on",
2129 .handler = aarch64_handle_smp_on_command,
2130 .mode = COMMAND_EXEC,
2131 .help = "Restart smp handling",
2132 .usage = "",
2133 },
2134 {
2135 .name = "smp_gdb",
2136 .handler = aarch64_handle_smp_gdb_command,
2137 .mode = COMMAND_EXEC,
2138 .help = "display/fix current core played to gdb",
2139 .usage = "",
2140 },
2141
2142
2143 COMMAND_REGISTRATION_DONE
2144 };
2145 static const struct command_registration aarch64_command_handlers[] = {
2146 {
2147 .chain = arm_command_handlers,
2148 },
2149 {
2150 .chain = armv8_command_handlers,
2151 },
2152 {
2153 .name = "cortex_a",
2154 .mode = COMMAND_ANY,
2155 .help = "Cortex-A command group",
2156 .usage = "",
2157 .chain = aarch64_exec_command_handlers,
2158 },
2159 COMMAND_REGISTRATION_DONE
2160 };
2161
2162 struct target_type aarch64_target = {
2163 .name = "aarch64",
2164
2165 .poll = aarch64_poll,
2166 .arch_state = armv8_arch_state,
2167
2168 .halt = aarch64_halt,
2169 .resume = aarch64_resume,
2170 .step = aarch64_step,
2171
2172 .assert_reset = aarch64_assert_reset,
2173 .deassert_reset = aarch64_deassert_reset,
2174
2175 /* REVISIT allow exporting VFP3 registers ... */
2176 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2177
2178 .read_memory = aarch64_read_memory,
2179 .write_memory = aarch64_write_memory,
2180
2181 .checksum_memory = arm_checksum_memory,
2182 .blank_check_memory = arm_blank_check_memory,
2183
2184 .run_algorithm = armv4_5_run_algorithm,
2185
2186 .add_breakpoint = aarch64_add_breakpoint,
2187 .add_context_breakpoint = aarch64_add_context_breakpoint,
2188 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2189 .remove_breakpoint = aarch64_remove_breakpoint,
2190 .add_watchpoint = NULL,
2191 .remove_watchpoint = NULL,
2192
2193 .commands = aarch64_command_handlers,
2194 .target_create = aarch64_target_create,
2195 .init_target = aarch64_init_target,
2196 .examine = aarch64_examine,
2197
2198 .read_phys_memory = aarch64_read_phys_memory,
2199 .write_phys_memory = aarch64_write_phys_memory,
2200 .mmu = aarch64_mmu,
2201 .virt2phys = aarch64_virt2phys,
2202 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)