aarch64: make sure to enable HDE for all SMP PEs to be halted
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 uint32_t dscr;
286
287 /* Read DSCR */
288 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
289 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
290 if (ERROR_OK != retval)
291 return retval;
292
293 /* clear bitfield */
294 dscr &= ~bit_mask;
295 /* put new value */
296 dscr |= value & bit_mask;
297
298 /* write new DSCR */
299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
300 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
301 return retval;
302 }
303
304 static struct target *get_aarch64(struct target *target, int32_t coreid)
305 {
306 struct target_list *head;
307 struct target *curr;
308
309 head = target->head;
310 while (head != (struct target_list *)NULL) {
311 curr = head->target;
312 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
313 return curr;
314 head = head->next;
315 }
316 return target;
317 }
318 static int aarch64_halt(struct target *target);
319
320 static int aarch64_halt_smp(struct target *target)
321 {
322 int retval = ERROR_OK;
323 struct target_list *head = target->head;
324
325 while (head != (struct target_list *)NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328
329 /* open the gate for channel 0 to let HALT requests pass to the CTM */
330 if (curr->smp) {
331 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
332 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
333 if (retval == ERROR_OK)
334 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
335 }
336 if (retval != ERROR_OK)
337 break;
338
339 head = head->next;
340 }
341
342 /* halt the target PE */
343 if (retval == ERROR_OK)
344 retval = aarch64_halt(target);
345
346 return retval;
347 }
348
349 static int update_halt_gdb(struct target *target)
350 {
351 int retval = 0;
352 if (target->gdb_service && target->gdb_service->core[0] == -1) {
353 target->gdb_service->target = target;
354 target->gdb_service->core[0] = target->coreid;
355 retval += aarch64_halt_smp(target);
356 }
357 return retval;
358 }
359
360 /*
361 * Cortex-A8 Run control
362 */
363
364 static int aarch64_poll(struct target *target)
365 {
366 int retval = ERROR_OK;
367 uint32_t dscr;
368 struct aarch64_common *aarch64 = target_to_aarch64(target);
369 struct armv8_common *armv8 = &aarch64->armv8_common;
370 enum target_state prev_target_state = target->state;
371 /* toggle to another core is done by gdb as follow */
372 /* maint packet J core_id */
373 /* continue */
374 /* the next polling trigger an halt event sent to gdb */
375 if ((target->state == TARGET_HALTED) && (target->smp) &&
376 (target->gdb_service) &&
377 (target->gdb_service->target == NULL)) {
378 target->gdb_service->target =
379 get_aarch64(target, target->gdb_service->core[1]);
380 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
381 return retval;
382 }
383 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
384 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
385 if (retval != ERROR_OK)
386 return retval;
387 aarch64->cpudbg_dscr = dscr;
388
389 if (DSCR_RUN_MODE(dscr) == 0x3) {
390 if (prev_target_state != TARGET_HALTED) {
391 /* We have a halting debug event */
392 LOG_DEBUG("Target halted");
393 target->state = TARGET_HALTED;
394 if ((prev_target_state == TARGET_RUNNING)
395 || (prev_target_state == TARGET_UNKNOWN)
396 || (prev_target_state == TARGET_RESET)) {
397 retval = aarch64_debug_entry(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (target->smp) {
401 retval = update_halt_gdb(target);
402 if (retval != ERROR_OK)
403 return retval;
404 }
405 target_call_event_callbacks(target,
406 TARGET_EVENT_HALTED);
407 }
408 if (prev_target_state == TARGET_DEBUG_RUNNING) {
409 LOG_DEBUG(" ");
410
411 retval = aarch64_debug_entry(target);
412 if (retval != ERROR_OK)
413 return retval;
414 if (target->smp) {
415 retval = update_halt_gdb(target);
416 if (retval != ERROR_OK)
417 return retval;
418 }
419
420 target_call_event_callbacks(target,
421 TARGET_EVENT_DEBUG_HALTED);
422 }
423 }
424 } else
425 target->state = TARGET_RUNNING;
426
427 return retval;
428 }
429
430 static int aarch64_halt(struct target *target)
431 {
432 int retval = ERROR_OK;
433 uint32_t dscr;
434 struct armv8_common *armv8 = target_to_armv8(target);
435
436 /*
437 * add HDE in halting debug mode
438 */
439 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
440 if (retval != ERROR_OK)
441 return retval;
442
443 /* trigger an event on channel 0, this outputs a halt request to the PE */
444 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
445 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
446 if (retval != ERROR_OK)
447 return retval;
448
449 long long then = timeval_ms();
450 for (;; ) {
451 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
452 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 if ((dscr & DSCRV8_HALT_MASK) != 0)
456 break;
457 if (timeval_ms() > then + 1000) {
458 LOG_ERROR("Timeout waiting for halt");
459 return ERROR_FAIL;
460 }
461 }
462
463 target->debug_reason = DBG_REASON_DBGRQ;
464
465 return ERROR_OK;
466 }
467
468 static int aarch64_internal_restore(struct target *target, int current,
469 uint64_t *address, int handle_breakpoints, int debug_execution)
470 {
471 struct armv8_common *armv8 = target_to_armv8(target);
472 struct arm *arm = &armv8->arm;
473 int retval;
474 uint64_t resume_pc;
475
476 if (!debug_execution)
477 target_free_all_working_areas(target);
478
479 /* current = 1: continue on current pc, otherwise continue at <address> */
480 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
481 if (!current)
482 resume_pc = *address;
483 else
484 *address = resume_pc;
485
486 /* Make sure that the Armv7 gdb thumb fixups does not
487 * kill the return address
488 */
489 switch (arm->core_state) {
490 case ARM_STATE_ARM:
491 resume_pc &= 0xFFFFFFFC;
492 break;
493 case ARM_STATE_AARCH64:
494 resume_pc &= 0xFFFFFFFFFFFFFFFC;
495 break;
496 case ARM_STATE_THUMB:
497 case ARM_STATE_THUMB_EE:
498 /* When the return address is loaded into PC
499 * bit 0 must be 1 to stay in Thumb state
500 */
501 resume_pc |= 0x1;
502 break;
503 case ARM_STATE_JAZELLE:
504 LOG_ERROR("How do I resume into Jazelle state??");
505 return ERROR_FAIL;
506 }
507 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
508 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
509 arm->pc->dirty = 1;
510 arm->pc->valid = 1;
511 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
512
513 /* called it now before restoring context because it uses cpu
514 * register r0 for restoring system control register */
515 retval = aarch64_restore_system_control_reg(target);
516 if (retval != ERROR_OK)
517 return retval;
518 retval = aarch64_restore_context(target, handle_breakpoints);
519 if (retval != ERROR_OK)
520 return retval;
521 target->debug_reason = DBG_REASON_NOTHALTED;
522 target->state = TARGET_RUNNING;
523
524 /* registers are now invalid */
525 register_cache_invalidate(arm->core_cache);
526
527 return retval;
528 }
529
530 static int aarch64_internal_restart(struct target *target, bool slave_pe)
531 {
532 struct armv8_common *armv8 = target_to_armv8(target);
533 struct arm *arm = &armv8->arm;
534 int retval;
535 uint32_t dscr;
536 /*
537 * * Restart core and wait for it to be started. Clear ITRen and sticky
538 * * exception flags: see ARMv7 ARM, C5.9.
539 *
540 * REVISIT: for single stepping, we probably want to
541 * disable IRQs by default, with optional override...
542 */
543
544 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
545 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
546 if (retval != ERROR_OK)
547 return retval;
548
549 if ((dscr & DSCR_ITE) == 0)
550 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
551
552 /* make sure to acknowledge the halt event before resuming */
553 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
554 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
555
556 /*
557 * open the CTI gate for channel 1 so that the restart events
558 * get passed along to all PEs
559 */
560 if (retval == ERROR_OK)
561 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
562 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
563 if (retval != ERROR_OK)
564 return retval;
565
566 if (!slave_pe) {
567 /* trigger an event on channel 1, generates a restart request to the PE */
568 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
569 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
570 if (retval != ERROR_OK)
571 return retval;
572
573 long long then = timeval_ms();
574 for (;; ) {
575 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
576 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
577 if (retval != ERROR_OK)
578 return retval;
579 if ((dscr & DSCR_HDE) != 0)
580 break;
581 if (timeval_ms() > then + 1000) {
582 LOG_ERROR("Timeout waiting for resume");
583 return ERROR_FAIL;
584 }
585 }
586 }
587
588 target->debug_reason = DBG_REASON_NOTHALTED;
589 target->state = TARGET_RUNNING;
590
591 /* registers are now invalid */
592 register_cache_invalidate(arm->core_cache);
593
594 return ERROR_OK;
595 }
596
597 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
598 {
599 int retval = 0;
600 struct target_list *head;
601 struct target *curr;
602 uint64_t address;
603 head = target->head;
604 while (head != (struct target_list *)NULL) {
605 curr = head->target;
606 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
607 /* resume current address , not in step mode */
608 retval += aarch64_internal_restore(curr, 1, &address,
609 handle_breakpoints, 0);
610 retval += aarch64_internal_restart(curr, true);
611 }
612 head = head->next;
613
614 }
615 return retval;
616 }
617
618 static int aarch64_resume(struct target *target, int current,
619 target_addr_t address, int handle_breakpoints, int debug_execution)
620 {
621 int retval = 0;
622 uint64_t addr = address;
623
624 /* dummy resume for smp toggle in order to reduce gdb impact */
625 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
626 /* simulate a start and halt of target */
627 target->gdb_service->target = NULL;
628 target->gdb_service->core[0] = target->gdb_service->core[1];
629 /* fake resume at next poll we play the target core[1], see poll*/
630 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
631 return 0;
632 }
633 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
634 debug_execution);
635 if (target->smp) {
636 target->gdb_service->core[0] = -1;
637 retval = aarch64_restore_smp(target, handle_breakpoints);
638 if (retval != ERROR_OK)
639 return retval;
640 }
641 aarch64_internal_restart(target, false);
642
643 if (!debug_execution) {
644 target->state = TARGET_RUNNING;
645 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
646 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
647 } else {
648 target->state = TARGET_DEBUG_RUNNING;
649 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
650 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
651 }
652
653 return ERROR_OK;
654 }
655
656 static int aarch64_debug_entry(struct target *target)
657 {
658 int retval = ERROR_OK;
659 struct aarch64_common *aarch64 = target_to_aarch64(target);
660 struct armv8_common *armv8 = target_to_armv8(target);
661 struct arm_dpm *dpm = &armv8->dpm;
662 enum arm_state core_state;
663
664 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
665
666 dpm->dscr = aarch64->cpudbg_dscr;
667 core_state = armv8_dpm_get_core_state(dpm);
668 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
669 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
670
671 /* make sure to clear all sticky errors */
672 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
673 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
674 if (retval != ERROR_OK)
675 return retval;
676
677 /* Examine debug reason */
678 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
679
680 /* save address of instruction that triggered the watchpoint? */
681 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
682 uint32_t tmp;
683 uint64_t wfar = 0;
684
685 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
686 armv8->debug_base + CPUV8_DBG_WFAR1,
687 &tmp);
688 if (retval != ERROR_OK)
689 return retval;
690 wfar = tmp;
691 wfar = (wfar << 32);
692 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
693 armv8->debug_base + CPUV8_DBG_WFAR0,
694 &tmp);
695 if (retval != ERROR_OK)
696 return retval;
697 wfar |= tmp;
698 armv8_dpm_report_wfar(&armv8->dpm, wfar);
699 }
700
701 retval = armv8_dpm_read_current_registers(&armv8->dpm);
702
703 if (retval == ERROR_OK && armv8->post_debug_entry)
704 retval = armv8->post_debug_entry(target);
705
706 return retval;
707 }
708
709 static int aarch64_post_debug_entry(struct target *target)
710 {
711 struct aarch64_common *aarch64 = target_to_aarch64(target);
712 struct armv8_common *armv8 = &aarch64->armv8_common;
713 int retval;
714
715 /* clear sticky errors */
716 mem_ap_write_atomic_u32(armv8->debug_ap,
717 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
718
719 switch (armv8->arm.core_mode) {
720 case ARMV8_64_EL0T:
721 armv8_dpm_modeswitch(&armv8->dpm, ARMV8_64_EL1H);
722 /* fall through */
723 case ARMV8_64_EL1T:
724 case ARMV8_64_EL1H:
725 retval = armv8->arm.mrs(target, 3, /*op 0*/
726 0, 0, /* op1, op2 */
727 1, 0, /* CRn, CRm */
728 &aarch64->system_control_reg);
729 if (retval != ERROR_OK)
730 return retval;
731 break;
732 case ARMV8_64_EL2T:
733 case ARMV8_64_EL2H:
734 retval = armv8->arm.mrs(target, 3, /*op 0*/
735 4, 0, /* op1, op2 */
736 1, 0, /* CRn, CRm */
737 &aarch64->system_control_reg);
738 if (retval != ERROR_OK)
739 return retval;
740 break;
741 case ARMV8_64_EL3H:
742 case ARMV8_64_EL3T:
743 retval = armv8->arm.mrs(target, 3, /*op 0*/
744 6, 0, /* op1, op2 */
745 1, 0, /* CRn, CRm */
746 &aarch64->system_control_reg);
747 if (retval != ERROR_OK)
748 return retval;
749 break;
750
751 case ARM_MODE_SVC:
752 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
753 if (retval != ERROR_OK)
754 return retval;
755 break;
756
757 default:
758 LOG_INFO("cannot read system control register in this mode");
759 break;
760 }
761
762 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
763
764 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
765 aarch64->system_control_reg_curr = aarch64->system_control_reg;
766
767 if (armv8->armv8_mmu.armv8_cache.info == -1) {
768 armv8_identify_cache(armv8);
769 armv8_read_mpidr(armv8);
770 }
771
772 armv8->armv8_mmu.mmu_enabled =
773 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
774 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
775 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
776 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
777 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
778 aarch64->curr_mode = armv8->arm.core_mode;
779 return ERROR_OK;
780 }
781
782 static int aarch64_step(struct target *target, int current, target_addr_t address,
783 int handle_breakpoints)
784 {
785 struct armv8_common *armv8 = target_to_armv8(target);
786 int retval;
787 uint32_t edecr;
788
789 if (target->state != TARGET_HALTED) {
790 LOG_WARNING("target not halted");
791 return ERROR_TARGET_NOT_HALTED;
792 }
793
794 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
795 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
796 if (retval != ERROR_OK)
797 return retval;
798
799 /* make sure EDECR.SS is not set when restoring the register */
800 edecr &= ~0x4;
801
802 /* set EDECR.SS to enter hardware step mode */
803 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
804 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
805 if (retval != ERROR_OK)
806 return retval;
807
808 /* disable interrupts while stepping */
809 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
810 if (retval != ERROR_OK)
811 return ERROR_OK;
812
813 /* resume the target */
814 retval = aarch64_resume(target, current, address, 0, 0);
815 if (retval != ERROR_OK)
816 return retval;
817
818 long long then = timeval_ms();
819 while (target->state != TARGET_HALTED) {
820 retval = aarch64_poll(target);
821 if (retval != ERROR_OK)
822 return retval;
823 if (timeval_ms() > then + 1000) {
824 LOG_ERROR("timeout waiting for target halt");
825 return ERROR_FAIL;
826 }
827 }
828
829 /* restore EDECR */
830 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
831 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
832 if (retval != ERROR_OK)
833 return retval;
834
835 /* restore interrupts */
836 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
837 if (retval != ERROR_OK)
838 return ERROR_OK;
839
840 return ERROR_OK;
841 }
842
843 static int aarch64_restore_context(struct target *target, bool bpwp)
844 {
845 struct armv8_common *armv8 = target_to_armv8(target);
846
847 LOG_DEBUG(" ");
848
849 if (armv8->pre_restore_context)
850 armv8->pre_restore_context(target);
851
852 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
853
854 }
855
856 /*
857 * Cortex-A8 Breakpoint and watchpoint functions
858 */
859
860 /* Setup hardware Breakpoint Register Pair */
861 static int aarch64_set_breakpoint(struct target *target,
862 struct breakpoint *breakpoint, uint8_t matchmode)
863 {
864 int retval;
865 int brp_i = 0;
866 uint32_t control;
867 uint8_t byte_addr_select = 0x0F;
868 struct aarch64_common *aarch64 = target_to_aarch64(target);
869 struct armv8_common *armv8 = &aarch64->armv8_common;
870 struct aarch64_brp *brp_list = aarch64->brp_list;
871
872 if (breakpoint->set) {
873 LOG_WARNING("breakpoint already set");
874 return ERROR_OK;
875 }
876
877 if (breakpoint->type == BKPT_HARD) {
878 int64_t bpt_value;
879 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
880 brp_i++;
881 if (brp_i >= aarch64->brp_num) {
882 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
883 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
884 }
885 breakpoint->set = brp_i + 1;
886 if (breakpoint->length == 2)
887 byte_addr_select = (3 << (breakpoint->address & 0x02));
888 control = ((matchmode & 0x7) << 20)
889 | (1 << 13)
890 | (byte_addr_select << 5)
891 | (3 << 1) | 1;
892 brp_list[brp_i].used = 1;
893 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
894 brp_list[brp_i].control = control;
895 bpt_value = brp_list[brp_i].value;
896
897 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
898 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
899 (uint32_t)(bpt_value & 0xFFFFFFFF));
900 if (retval != ERROR_OK)
901 return retval;
902 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
903 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
904 (uint32_t)(bpt_value >> 32));
905 if (retval != ERROR_OK)
906 return retval;
907
908 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
909 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
910 brp_list[brp_i].control);
911 if (retval != ERROR_OK)
912 return retval;
913 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
914 brp_list[brp_i].control,
915 brp_list[brp_i].value);
916
917 } else if (breakpoint->type == BKPT_SOFT) {
918 uint8_t code[4];
919
920 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
921 retval = target_read_memory(target,
922 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
923 breakpoint->length, 1,
924 breakpoint->orig_instr);
925 if (retval != ERROR_OK)
926 return retval;
927
928 armv8_cache_d_inner_flush_virt(armv8,
929 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
930 breakpoint->length);
931
932 retval = target_write_memory(target,
933 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
934 breakpoint->length, 1, code);
935 if (retval != ERROR_OK)
936 return retval;
937
938 armv8_cache_d_inner_flush_virt(armv8,
939 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
940 breakpoint->length);
941
942 armv8_cache_i_inner_inval_virt(armv8,
943 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
944 breakpoint->length);
945
946 breakpoint->set = 0x11; /* Any nice value but 0 */
947 }
948
949 /* Ensure that halting debug mode is enable */
950 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
951 if (retval != ERROR_OK) {
952 LOG_DEBUG("Failed to set DSCR.HDE");
953 return retval;
954 }
955
956 return ERROR_OK;
957 }
958
959 static int aarch64_set_context_breakpoint(struct target *target,
960 struct breakpoint *breakpoint, uint8_t matchmode)
961 {
962 int retval = ERROR_FAIL;
963 int brp_i = 0;
964 uint32_t control;
965 uint8_t byte_addr_select = 0x0F;
966 struct aarch64_common *aarch64 = target_to_aarch64(target);
967 struct armv8_common *armv8 = &aarch64->armv8_common;
968 struct aarch64_brp *brp_list = aarch64->brp_list;
969
970 if (breakpoint->set) {
971 LOG_WARNING("breakpoint already set");
972 return retval;
973 }
974 /*check available context BRPs*/
975 while ((brp_list[brp_i].used ||
976 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
977 brp_i++;
978
979 if (brp_i >= aarch64->brp_num) {
980 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
981 return ERROR_FAIL;
982 }
983
984 breakpoint->set = brp_i + 1;
985 control = ((matchmode & 0x7) << 20)
986 | (1 << 13)
987 | (byte_addr_select << 5)
988 | (3 << 1) | 1;
989 brp_list[brp_i].used = 1;
990 brp_list[brp_i].value = (breakpoint->asid);
991 brp_list[brp_i].control = control;
992 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
993 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
994 brp_list[brp_i].value);
995 if (retval != ERROR_OK)
996 return retval;
997 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
998 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
999 brp_list[brp_i].control);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1003 brp_list[brp_i].control,
1004 brp_list[brp_i].value);
1005 return ERROR_OK;
1006
1007 }
1008
1009 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1010 {
1011 int retval = ERROR_FAIL;
1012 int brp_1 = 0; /* holds the contextID pair */
1013 int brp_2 = 0; /* holds the IVA pair */
1014 uint32_t control_CTX, control_IVA;
1015 uint8_t CTX_byte_addr_select = 0x0F;
1016 uint8_t IVA_byte_addr_select = 0x0F;
1017 uint8_t CTX_machmode = 0x03;
1018 uint8_t IVA_machmode = 0x01;
1019 struct aarch64_common *aarch64 = target_to_aarch64(target);
1020 struct armv8_common *armv8 = &aarch64->armv8_common;
1021 struct aarch64_brp *brp_list = aarch64->brp_list;
1022
1023 if (breakpoint->set) {
1024 LOG_WARNING("breakpoint already set");
1025 return retval;
1026 }
1027 /*check available context BRPs*/
1028 while ((brp_list[brp_1].used ||
1029 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1030 brp_1++;
1031
1032 printf("brp(CTX) found num: %d\n", brp_1);
1033 if (brp_1 >= aarch64->brp_num) {
1034 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1035 return ERROR_FAIL;
1036 }
1037
1038 while ((brp_list[brp_2].used ||
1039 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1040 brp_2++;
1041
1042 printf("brp(IVA) found num: %d\n", brp_2);
1043 if (brp_2 >= aarch64->brp_num) {
1044 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1045 return ERROR_FAIL;
1046 }
1047
1048 breakpoint->set = brp_1 + 1;
1049 breakpoint->linked_BRP = brp_2;
1050 control_CTX = ((CTX_machmode & 0x7) << 20)
1051 | (brp_2 << 16)
1052 | (0 << 14)
1053 | (CTX_byte_addr_select << 5)
1054 | (3 << 1) | 1;
1055 brp_list[brp_1].used = 1;
1056 brp_list[brp_1].value = (breakpoint->asid);
1057 brp_list[brp_1].control = control_CTX;
1058 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1059 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1060 brp_list[brp_1].value);
1061 if (retval != ERROR_OK)
1062 return retval;
1063 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1064 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1065 brp_list[brp_1].control);
1066 if (retval != ERROR_OK)
1067 return retval;
1068
1069 control_IVA = ((IVA_machmode & 0x7) << 20)
1070 | (brp_1 << 16)
1071 | (1 << 13)
1072 | (IVA_byte_addr_select << 5)
1073 | (3 << 1) | 1;
1074 brp_list[brp_2].used = 1;
1075 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1076 brp_list[brp_2].control = control_IVA;
1077 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1078 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1079 brp_list[brp_2].value & 0xFFFFFFFF);
1080 if (retval != ERROR_OK)
1081 return retval;
1082 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1083 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1084 brp_list[brp_2].value >> 32);
1085 if (retval != ERROR_OK)
1086 return retval;
1087 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1088 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1089 brp_list[brp_2].control);
1090 if (retval != ERROR_OK)
1091 return retval;
1092
1093 return ERROR_OK;
1094 }
1095
1096 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1097 {
1098 int retval;
1099 struct aarch64_common *aarch64 = target_to_aarch64(target);
1100 struct armv8_common *armv8 = &aarch64->armv8_common;
1101 struct aarch64_brp *brp_list = aarch64->brp_list;
1102
1103 if (!breakpoint->set) {
1104 LOG_WARNING("breakpoint not set");
1105 return ERROR_OK;
1106 }
1107
1108 if (breakpoint->type == BKPT_HARD) {
1109 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1110 int brp_i = breakpoint->set - 1;
1111 int brp_j = breakpoint->linked_BRP;
1112 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1113 LOG_DEBUG("Invalid BRP number in breakpoint");
1114 return ERROR_OK;
1115 }
1116 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1117 brp_list[brp_i].control, brp_list[brp_i].value);
1118 brp_list[brp_i].used = 0;
1119 brp_list[brp_i].value = 0;
1120 brp_list[brp_i].control = 0;
1121 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1122 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1123 brp_list[brp_i].control);
1124 if (retval != ERROR_OK)
1125 return retval;
1126 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1127 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1128 (uint32_t)brp_list[brp_i].value);
1129 if (retval != ERROR_OK)
1130 return retval;
1131 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1132 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1133 (uint32_t)brp_list[brp_i].value);
1134 if (retval != ERROR_OK)
1135 return retval;
1136 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1137 LOG_DEBUG("Invalid BRP number in breakpoint");
1138 return ERROR_OK;
1139 }
1140 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1141 brp_list[brp_j].control, brp_list[brp_j].value);
1142 brp_list[brp_j].used = 0;
1143 brp_list[brp_j].value = 0;
1144 brp_list[brp_j].control = 0;
1145 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1146 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1147 brp_list[brp_j].control);
1148 if (retval != ERROR_OK)
1149 return retval;
1150 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1151 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1152 (uint32_t)brp_list[brp_j].value);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1156 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1157 (uint32_t)brp_list[brp_j].value);
1158 if (retval != ERROR_OK)
1159 return retval;
1160
1161 breakpoint->linked_BRP = 0;
1162 breakpoint->set = 0;
1163 return ERROR_OK;
1164
1165 } else {
1166 int brp_i = breakpoint->set - 1;
1167 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1168 LOG_DEBUG("Invalid BRP number in breakpoint");
1169 return ERROR_OK;
1170 }
1171 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1172 brp_list[brp_i].control, brp_list[brp_i].value);
1173 brp_list[brp_i].used = 0;
1174 brp_list[brp_i].value = 0;
1175 brp_list[brp_i].control = 0;
1176 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1177 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1178 brp_list[brp_i].control);
1179 if (retval != ERROR_OK)
1180 return retval;
1181 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1182 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1183 brp_list[brp_i].value);
1184 if (retval != ERROR_OK)
1185 return retval;
1186
1187 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1188 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1189 (uint32_t)brp_list[brp_i].value);
1190 if (retval != ERROR_OK)
1191 return retval;
1192 breakpoint->set = 0;
1193 return ERROR_OK;
1194 }
1195 } else {
1196 /* restore original instruction (kept in target endianness) */
1197
1198 armv8_cache_d_inner_flush_virt(armv8,
1199 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1200 breakpoint->length);
1201
1202 if (breakpoint->length == 4) {
1203 retval = target_write_memory(target,
1204 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1205 4, 1, breakpoint->orig_instr);
1206 if (retval != ERROR_OK)
1207 return retval;
1208 } else {
1209 retval = target_write_memory(target,
1210 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1211 2, 1, breakpoint->orig_instr);
1212 if (retval != ERROR_OK)
1213 return retval;
1214 }
1215
1216 armv8_cache_d_inner_flush_virt(armv8,
1217 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1218 breakpoint->length);
1219
1220 armv8_cache_i_inner_inval_virt(armv8,
1221 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1222 breakpoint->length);
1223 }
1224 breakpoint->set = 0;
1225
1226 return ERROR_OK;
1227 }
1228
1229 static int aarch64_add_breakpoint(struct target *target,
1230 struct breakpoint *breakpoint)
1231 {
1232 struct aarch64_common *aarch64 = target_to_aarch64(target);
1233
1234 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1235 LOG_INFO("no hardware breakpoint available");
1236 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1237 }
1238
1239 if (breakpoint->type == BKPT_HARD)
1240 aarch64->brp_num_available--;
1241
1242 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1243 }
1244
1245 static int aarch64_add_context_breakpoint(struct target *target,
1246 struct breakpoint *breakpoint)
1247 {
1248 struct aarch64_common *aarch64 = target_to_aarch64(target);
1249
1250 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1251 LOG_INFO("no hardware breakpoint available");
1252 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1253 }
1254
1255 if (breakpoint->type == BKPT_HARD)
1256 aarch64->brp_num_available--;
1257
1258 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1259 }
1260
1261 static int aarch64_add_hybrid_breakpoint(struct target *target,
1262 struct breakpoint *breakpoint)
1263 {
1264 struct aarch64_common *aarch64 = target_to_aarch64(target);
1265
1266 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1267 LOG_INFO("no hardware breakpoint available");
1268 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1269 }
1270
1271 if (breakpoint->type == BKPT_HARD)
1272 aarch64->brp_num_available--;
1273
1274 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1275 }
1276
1277
1278 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1279 {
1280 struct aarch64_common *aarch64 = target_to_aarch64(target);
1281
1282 #if 0
1283 /* It is perfectly possible to remove breakpoints while the target is running */
1284 if (target->state != TARGET_HALTED) {
1285 LOG_WARNING("target not halted");
1286 return ERROR_TARGET_NOT_HALTED;
1287 }
1288 #endif
1289
1290 if (breakpoint->set) {
1291 aarch64_unset_breakpoint(target, breakpoint);
1292 if (breakpoint->type == BKPT_HARD)
1293 aarch64->brp_num_available++;
1294 }
1295
1296 return ERROR_OK;
1297 }
1298
1299 /*
1300 * Cortex-A8 Reset functions
1301 */
1302
1303 static int aarch64_assert_reset(struct target *target)
1304 {
1305 struct armv8_common *armv8 = target_to_armv8(target);
1306
1307 LOG_DEBUG(" ");
1308
1309 /* FIXME when halt is requested, make it work somehow... */
1310
1311 /* Issue some kind of warm reset. */
1312 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1313 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1314 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1315 /* REVISIT handle "pulls" cases, if there's
1316 * hardware that needs them to work.
1317 */
1318 jtag_add_reset(0, 1);
1319 } else {
1320 LOG_ERROR("%s: how to reset?", target_name(target));
1321 return ERROR_FAIL;
1322 }
1323
1324 /* registers are now invalid */
1325 register_cache_invalidate(armv8->arm.core_cache);
1326
1327 target->state = TARGET_RESET;
1328
1329 return ERROR_OK;
1330 }
1331
1332 static int aarch64_deassert_reset(struct target *target)
1333 {
1334 int retval;
1335
1336 LOG_DEBUG(" ");
1337
1338 /* be certain SRST is off */
1339 jtag_add_reset(0, 0);
1340
1341 retval = aarch64_poll(target);
1342 if (retval != ERROR_OK)
1343 return retval;
1344
1345 if (target->reset_halt) {
1346 if (target->state != TARGET_HALTED) {
1347 LOG_WARNING("%s: ran after reset and before halt ...",
1348 target_name(target));
1349 retval = target_halt(target);
1350 if (retval != ERROR_OK)
1351 return retval;
1352 }
1353 }
1354
1355 return ERROR_OK;
1356 }
1357
1358 static int aarch64_write_apb_ap_memory(struct target *target,
1359 uint64_t address, uint32_t size,
1360 uint32_t count, const uint8_t *buffer)
1361 {
1362 /* write memory through APB-AP */
1363 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1364 struct armv8_common *armv8 = target_to_armv8(target);
1365 struct arm_dpm *dpm = &armv8->dpm;
1366 struct arm *arm = &armv8->arm;
1367 int total_bytes = count * size;
1368 int total_u32;
1369 int start_byte = address & 0x3;
1370 int end_byte = (address + total_bytes) & 0x3;
1371 struct reg *reg;
1372 uint32_t dscr;
1373 uint8_t *tmp_buff = NULL;
1374
1375 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1376 address, size, count);
1377 if (target->state != TARGET_HALTED) {
1378 LOG_WARNING("target not halted");
1379 return ERROR_TARGET_NOT_HALTED;
1380 }
1381
1382 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1383
1384 /* Mark register R0 as dirty, as it will be used
1385 * for transferring the data.
1386 * It will be restored automatically when exiting
1387 * debug mode
1388 */
1389 reg = armv8_reg_current(arm, 1);
1390 reg->dirty = true;
1391
1392 reg = armv8_reg_current(arm, 0);
1393 reg->dirty = true;
1394
1395 /* clear any abort */
1396 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1397 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1398 if (retval != ERROR_OK)
1399 return retval;
1400
1401
1402 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1403
1404 /* The algorithm only copies 32 bit words, so the buffer
1405 * should be expanded to include the words at either end.
1406 * The first and last words will be read first to avoid
1407 * corruption if needed.
1408 */
1409 tmp_buff = malloc(total_u32 * 4);
1410
1411 if ((start_byte != 0) && (total_u32 > 1)) {
1412 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1413 * the other bytes in the word.
1414 */
1415 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1416 if (retval != ERROR_OK)
1417 goto error_free_buff_w;
1418 }
1419
1420 /* If end of write is not aligned, or the write is less than 4 bytes */
1421 if ((end_byte != 0) ||
1422 ((total_u32 == 1) && (total_bytes != 4))) {
1423
1424 /* Read the last word to avoid corruption during 32 bit write */
1425 int mem_offset = (total_u32-1) * 4;
1426 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1427 if (retval != ERROR_OK)
1428 goto error_free_buff_w;
1429 }
1430
1431 /* Copy the write buffer over the top of the temporary buffer */
1432 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1433
1434 /* We now have a 32 bit aligned buffer that can be written */
1435
1436 /* Read DSCR */
1437 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1438 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1439 if (retval != ERROR_OK)
1440 goto error_free_buff_w;
1441
1442 /* Set Normal access mode */
1443 dscr = (dscr & ~DSCR_MA);
1444 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1445 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1446
1447 if (arm->core_state == ARM_STATE_AARCH64) {
1448 /* Write X0 with value 'address' using write procedure */
1449 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1450 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1451 retval = dpm->instr_write_data_dcc_64(dpm,
1452 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1453 } else {
1454 /* Write R0 with value 'address' using write procedure */
1455 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1456 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1457 dpm->instr_write_data_dcc(dpm,
1458 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1459
1460 }
1461 /* Step 1.d - Change DCC to memory mode */
1462 dscr = dscr | DSCR_MA;
1463 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1464 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1465 if (retval != ERROR_OK)
1466 goto error_unset_dtr_w;
1467
1468
1469 /* Step 2.a - Do the write */
1470 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1471 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1472 if (retval != ERROR_OK)
1473 goto error_unset_dtr_w;
1474
1475 /* Step 3.a - Switch DTR mode back to Normal mode */
1476 dscr = (dscr & ~DSCR_MA);
1477 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1478 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1479 if (retval != ERROR_OK)
1480 goto error_unset_dtr_w;
1481
1482 /* Check for sticky abort flags in the DSCR */
1483 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1484 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1485 if (retval != ERROR_OK)
1486 goto error_free_buff_w;
1487
1488 dpm->dscr = dscr;
1489 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1490 /* Abort occurred - clear it and exit */
1491 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1492 mem_ap_write_atomic_u32(armv8->debug_ap,
1493 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1494 armv8_dpm_handle_exception(dpm);
1495 goto error_free_buff_w;
1496 }
1497
1498 /* Done */
1499 free(tmp_buff);
1500 return ERROR_OK;
1501
1502 error_unset_dtr_w:
1503 /* Unset DTR mode */
1504 mem_ap_read_atomic_u32(armv8->debug_ap,
1505 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1506 dscr = (dscr & ~DSCR_MA);
1507 mem_ap_write_atomic_u32(armv8->debug_ap,
1508 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1509 error_free_buff_w:
1510 LOG_ERROR("error");
1511 free(tmp_buff);
1512 return ERROR_FAIL;
1513 }
1514
1515 static int aarch64_read_apb_ap_memory(struct target *target,
1516 target_addr_t address, uint32_t size,
1517 uint32_t count, uint8_t *buffer)
1518 {
1519 /* read memory through APB-AP */
1520 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1521 struct armv8_common *armv8 = target_to_armv8(target);
1522 struct arm_dpm *dpm = &armv8->dpm;
1523 struct arm *arm = &armv8->arm;
1524 int total_bytes = count * size;
1525 int total_u32;
1526 int start_byte = address & 0x3;
1527 int end_byte = (address + total_bytes) & 0x3;
1528 struct reg *reg;
1529 uint32_t dscr;
1530 uint8_t *tmp_buff = NULL;
1531 uint8_t *u8buf_ptr;
1532 uint32_t value;
1533
1534 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1535 address, size, count);
1536 if (target->state != TARGET_HALTED) {
1537 LOG_WARNING("target not halted");
1538 return ERROR_TARGET_NOT_HALTED;
1539 }
1540
1541 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1542 /* Mark register X0, X1 as dirty, as it will be used
1543 * for transferring the data.
1544 * It will be restored automatically when exiting
1545 * debug mode
1546 */
1547 reg = armv8_reg_current(arm, 1);
1548 reg->dirty = true;
1549
1550 reg = armv8_reg_current(arm, 0);
1551 reg->dirty = true;
1552
1553 /* clear any abort */
1554 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1555 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1556 if (retval != ERROR_OK)
1557 goto error_free_buff_r;
1558
1559 /* Read DSCR */
1560 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1561 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1562
1563 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1564
1565 /* Set Normal access mode */
1566 dscr = (dscr & ~DSCR_MA);
1567 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1568 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1569
1570 if (arm->core_state == ARM_STATE_AARCH64) {
1571 /* Write X0 with value 'address' using write procedure */
1572 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1573 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1574 retval += dpm->instr_write_data_dcc_64(dpm,
1575 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1576 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1577 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1578 /* Step 1.e - Change DCC to memory mode */
1579 dscr = dscr | DSCR_MA;
1580 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1581 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1582 /* Step 1.f - read DBGDTRTX and discard the value */
1583 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1584 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1585 } else {
1586 /* Write R0 with value 'address' using write procedure */
1587 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1588 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1589 retval += dpm->instr_write_data_dcc(dpm,
1590 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1591 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1592 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1593 /* Step 1.e - Change DCC to memory mode */
1594 dscr = dscr | DSCR_MA;
1595 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1596 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1597 /* Step 1.f - read DBGDTRTX and discard the value */
1598 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1599 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1600
1601 }
1602 if (retval != ERROR_OK)
1603 goto error_unset_dtr_r;
1604
1605 /* Optimize the read as much as we can, either way we read in a single pass */
1606 if ((start_byte) || (end_byte)) {
1607 /* The algorithm only copies 32 bit words, so the buffer
1608 * should be expanded to include the words at either end.
1609 * The first and last words will be read into a temp buffer
1610 * to avoid corruption
1611 */
1612 tmp_buff = malloc(total_u32 * 4);
1613 if (!tmp_buff)
1614 goto error_unset_dtr_r;
1615
1616 /* use the tmp buffer to read the entire data */
1617 u8buf_ptr = tmp_buff;
1618 } else
1619 /* address and read length are aligned so read directly into the passed buffer */
1620 u8buf_ptr = buffer;
1621
1622 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1623 * Abort flags are sticky, so can be read at end of transactions
1624 *
1625 * This data is read in aligned to 32 bit boundary.
1626 */
1627
1628 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1629 * increments X0 by 4. */
1630 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1631 armv8->debug_base + CPUV8_DBG_DTRTX);
1632 if (retval != ERROR_OK)
1633 goto error_unset_dtr_r;
1634
1635 /* Step 3.a - set DTR access mode back to Normal mode */
1636 dscr = (dscr & ~DSCR_MA);
1637 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1638 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1639 if (retval != ERROR_OK)
1640 goto error_free_buff_r;
1641
1642 /* Step 3.b - read DBGDTRTX for the final value */
1643 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1644 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1645 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1646
1647 /* Check for sticky abort flags in the DSCR */
1648 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1649 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1650 if (retval != ERROR_OK)
1651 goto error_free_buff_r;
1652
1653 dpm->dscr = dscr;
1654
1655 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1656 /* Abort occurred - clear it and exit */
1657 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1658 mem_ap_write_atomic_u32(armv8->debug_ap,
1659 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1660 armv8_dpm_handle_exception(dpm);
1661 goto error_free_buff_r;
1662 }
1663
1664 /* check if we need to copy aligned data by applying any shift necessary */
1665 if (tmp_buff) {
1666 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1667 free(tmp_buff);
1668 }
1669
1670 /* Done */
1671 return ERROR_OK;
1672
1673 error_unset_dtr_r:
1674 /* Unset DTR mode */
1675 mem_ap_read_atomic_u32(armv8->debug_ap,
1676 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1677 dscr = (dscr & ~DSCR_MA);
1678 mem_ap_write_atomic_u32(armv8->debug_ap,
1679 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1680 error_free_buff_r:
1681 LOG_ERROR("error");
1682 free(tmp_buff);
1683 return ERROR_FAIL;
1684 }
1685
1686 static int aarch64_read_phys_memory(struct target *target,
1687 target_addr_t address, uint32_t size,
1688 uint32_t count, uint8_t *buffer)
1689 {
1690 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1691 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1692 address, size, count);
1693
1694 if (count && buffer) {
1695 /* read memory through APB-AP */
1696 retval = aarch64_mmu_modify(target, 0);
1697 if (retval != ERROR_OK)
1698 return retval;
1699 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1700 }
1701 return retval;
1702 }
1703
1704 static int aarch64_read_memory(struct target *target, target_addr_t address,
1705 uint32_t size, uint32_t count, uint8_t *buffer)
1706 {
1707 int mmu_enabled = 0;
1708 int retval;
1709
1710 /* aarch64 handles unaligned memory access */
1711 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1712 size, count);
1713
1714 /* determine if MMU was enabled on target stop */
1715 retval = aarch64_mmu(target, &mmu_enabled);
1716 if (retval != ERROR_OK)
1717 return retval;
1718
1719 if (mmu_enabled) {
1720 retval = aarch64_check_address(target, address);
1721 if (retval != ERROR_OK)
1722 return retval;
1723 /* enable MMU as we could have disabled it for phys access */
1724 retval = aarch64_mmu_modify(target, 1);
1725 if (retval != ERROR_OK)
1726 return retval;
1727 }
1728 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1729 }
1730
1731 static int aarch64_write_phys_memory(struct target *target,
1732 target_addr_t address, uint32_t size,
1733 uint32_t count, const uint8_t *buffer)
1734 {
1735 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1736
1737 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1738 size, count);
1739
1740 if (count && buffer) {
1741 /* write memory through APB-AP */
1742 retval = aarch64_mmu_modify(target, 0);
1743 if (retval != ERROR_OK)
1744 return retval;
1745 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1746 }
1747
1748 return retval;
1749 }
1750
1751 static int aarch64_write_memory(struct target *target, target_addr_t address,
1752 uint32_t size, uint32_t count, const uint8_t *buffer)
1753 {
1754 int mmu_enabled = 0;
1755 int retval;
1756
1757 /* aarch64 handles unaligned memory access */
1758 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1759 "; count %" PRId32, address, size, count);
1760
1761 /* determine if MMU was enabled on target stop */
1762 retval = aarch64_mmu(target, &mmu_enabled);
1763 if (retval != ERROR_OK)
1764 return retval;
1765
1766 if (mmu_enabled) {
1767 retval = aarch64_check_address(target, address);
1768 if (retval != ERROR_OK)
1769 return retval;
1770 /* enable MMU as we could have disabled it for phys access */
1771 retval = aarch64_mmu_modify(target, 1);
1772 if (retval != ERROR_OK)
1773 return retval;
1774 }
1775 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1776 }
1777
1778 static int aarch64_handle_target_request(void *priv)
1779 {
1780 struct target *target = priv;
1781 struct armv8_common *armv8 = target_to_armv8(target);
1782 int retval;
1783
1784 if (!target_was_examined(target))
1785 return ERROR_OK;
1786 if (!target->dbg_msg_enabled)
1787 return ERROR_OK;
1788
1789 if (target->state == TARGET_RUNNING) {
1790 uint32_t request;
1791 uint32_t dscr;
1792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1793 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1794
1795 /* check if we have data */
1796 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1797 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1798 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1799 if (retval == ERROR_OK) {
1800 target_request(target, request);
1801 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1802 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1803 }
1804 }
1805 }
1806
1807 return ERROR_OK;
1808 }
1809
1810 static int aarch64_examine_first(struct target *target)
1811 {
1812 struct aarch64_common *aarch64 = target_to_aarch64(target);
1813 struct armv8_common *armv8 = &aarch64->armv8_common;
1814 struct adiv5_dap *swjdp = armv8->arm.dap;
1815 int i;
1816 int retval = ERROR_OK;
1817 uint64_t debug, ttypr;
1818 uint32_t cpuid;
1819 uint32_t tmp0, tmp1;
1820 debug = ttypr = cpuid = 0;
1821
1822 /* We do one extra read to ensure DAP is configured,
1823 * we call ahbap_debugport_init(swjdp) instead
1824 */
1825 retval = dap_dp_init(swjdp);
1826 if (retval != ERROR_OK)
1827 return retval;
1828
1829 /* Search for the APB-AB - it is needed for access to debug registers */
1830 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1831 if (retval != ERROR_OK) {
1832 LOG_ERROR("Could not find APB-AP for debug access");
1833 return retval;
1834 }
1835
1836 retval = mem_ap_init(armv8->debug_ap);
1837 if (retval != ERROR_OK) {
1838 LOG_ERROR("Could not initialize the APB-AP");
1839 return retval;
1840 }
1841
1842 armv8->debug_ap->memaccess_tck = 80;
1843
1844 if (!target->dbgbase_set) {
1845 uint32_t dbgbase;
1846 /* Get ROM Table base */
1847 uint32_t apid;
1848 int32_t coreidx = target->coreid;
1849 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1850 if (retval != ERROR_OK)
1851 return retval;
1852 /* Lookup 0x15 -- Processor DAP */
1853 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1854 &armv8->debug_base, &coreidx);
1855 if (retval != ERROR_OK)
1856 return retval;
1857 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1858 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1859 } else
1860 armv8->debug_base = target->dbgbase;
1861
1862 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1863 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1864 if (retval != ERROR_OK) {
1865 LOG_DEBUG("LOCK debug access fail");
1866 return retval;
1867 }
1868
1869 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1870 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1871 if (retval != ERROR_OK) {
1872 LOG_DEBUG("Examine %s failed", "oslock");
1873 return retval;
1874 }
1875
1876 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1877 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1878 if (retval != ERROR_OK) {
1879 LOG_DEBUG("Examine %s failed", "CPUID");
1880 return retval;
1881 }
1882
1883 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1884 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1885 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1886 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1887 if (retval != ERROR_OK) {
1888 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1889 return retval;
1890 }
1891 ttypr |= tmp1;
1892 ttypr = (ttypr << 32) | tmp0;
1893
1894 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1895 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1896 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1897 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1898 if (retval != ERROR_OK) {
1899 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1900 return retval;
1901 }
1902 debug |= tmp1;
1903 debug = (debug << 32) | tmp0;
1904
1905 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1906 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1907 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1908
1909 if (target->ctibase == 0) {
1910 /* assume a v8 rom table layout */
1911 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1912 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1913 } else
1914 armv8->cti_base = target->ctibase;
1915
1916 armv8->arm.core_type = ARM_MODE_MON;
1917 retval = aarch64_dpm_setup(aarch64, debug);
1918 if (retval != ERROR_OK)
1919 return retval;
1920
1921 /* Setup Breakpoint Register Pairs */
1922 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1923 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1924 aarch64->brp_num_available = aarch64->brp_num;
1925 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1926 for (i = 0; i < aarch64->brp_num; i++) {
1927 aarch64->brp_list[i].used = 0;
1928 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1929 aarch64->brp_list[i].type = BRP_NORMAL;
1930 else
1931 aarch64->brp_list[i].type = BRP_CONTEXT;
1932 aarch64->brp_list[i].value = 0;
1933 aarch64->brp_list[i].control = 0;
1934 aarch64->brp_list[i].BRPn = i;
1935 }
1936
1937 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1938
1939 target_set_examined(target);
1940 return ERROR_OK;
1941 }
1942
1943 static int aarch64_examine(struct target *target)
1944 {
1945 int retval = ERROR_OK;
1946
1947 /* don't re-probe hardware after each reset */
1948 if (!target_was_examined(target))
1949 retval = aarch64_examine_first(target);
1950
1951 /* Configure core debug access */
1952 if (retval == ERROR_OK)
1953 retval = aarch64_init_debug_access(target);
1954
1955 return retval;
1956 }
1957
1958 /*
1959 * Cortex-A8 target creation and initialization
1960 */
1961
1962 static int aarch64_init_target(struct command_context *cmd_ctx,
1963 struct target *target)
1964 {
1965 /* examine_first() does a bunch of this */
1966 return ERROR_OK;
1967 }
1968
1969 static int aarch64_init_arch_info(struct target *target,
1970 struct aarch64_common *aarch64, struct jtag_tap *tap)
1971 {
1972 struct armv8_common *armv8 = &aarch64->armv8_common;
1973 struct adiv5_dap *dap = armv8->arm.dap;
1974
1975 armv8->arm.dap = dap;
1976
1977 /* Setup struct aarch64_common */
1978 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1979 /* tap has no dap initialized */
1980 if (!tap->dap) {
1981 tap->dap = dap_init();
1982
1983 /* Leave (only) generic DAP stuff for debugport_init() */
1984 tap->dap->tap = tap;
1985 }
1986
1987 armv8->arm.dap = tap->dap;
1988
1989 aarch64->fast_reg_read = 0;
1990
1991 /* register arch-specific functions */
1992 armv8->examine_debug_reason = NULL;
1993
1994 armv8->post_debug_entry = aarch64_post_debug_entry;
1995
1996 armv8->pre_restore_context = NULL;
1997
1998 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1999
2000 /* REVISIT v7a setup should be in a v7a-specific routine */
2001 armv8_init_arch_info(target, armv8);
2002 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2003
2004 return ERROR_OK;
2005 }
2006
2007 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2008 {
2009 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2010
2011 return aarch64_init_arch_info(target, aarch64, target->tap);
2012 }
2013
2014 static int aarch64_mmu(struct target *target, int *enabled)
2015 {
2016 if (target->state != TARGET_HALTED) {
2017 LOG_ERROR("%s: target not halted", __func__);
2018 return ERROR_TARGET_INVALID;
2019 }
2020
2021 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2022 return ERROR_OK;
2023 }
2024
2025 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2026 target_addr_t *phys)
2027 {
2028 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2029 }
2030
2031 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2032 {
2033 struct target *target = get_current_target(CMD_CTX);
2034 struct armv8_common *armv8 = target_to_armv8(target);
2035
2036 return armv8_handle_cache_info_command(CMD_CTX,
2037 &armv8->armv8_mmu.armv8_cache);
2038 }
2039
2040
2041 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2042 {
2043 struct target *target = get_current_target(CMD_CTX);
2044 if (!target_was_examined(target)) {
2045 LOG_ERROR("target not examined yet");
2046 return ERROR_FAIL;
2047 }
2048
2049 return aarch64_init_debug_access(target);
2050 }
2051 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2052 {
2053 struct target *target = get_current_target(CMD_CTX);
2054 /* check target is an smp target */
2055 struct target_list *head;
2056 struct target *curr;
2057 head = target->head;
2058 target->smp = 0;
2059 if (head != (struct target_list *)NULL) {
2060 while (head != (struct target_list *)NULL) {
2061 curr = head->target;
2062 curr->smp = 0;
2063 head = head->next;
2064 }
2065 /* fixes the target display to the debugger */
2066 target->gdb_service->target = target;
2067 }
2068 return ERROR_OK;
2069 }
2070
2071 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2072 {
2073 struct target *target = get_current_target(CMD_CTX);
2074 struct target_list *head;
2075 struct target *curr;
2076 head = target->head;
2077 if (head != (struct target_list *)NULL) {
2078 target->smp = 1;
2079 while (head != (struct target_list *)NULL) {
2080 curr = head->target;
2081 curr->smp = 1;
2082 head = head->next;
2083 }
2084 }
2085 return ERROR_OK;
2086 }
2087
2088 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2089 {
2090 struct target *target = get_current_target(CMD_CTX);
2091 int retval = ERROR_OK;
2092 struct target_list *head;
2093 head = target->head;
2094 if (head != (struct target_list *)NULL) {
2095 if (CMD_ARGC == 1) {
2096 int coreid = 0;
2097 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2098 if (ERROR_OK != retval)
2099 return retval;
2100 target->gdb_service->core[1] = coreid;
2101
2102 }
2103 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2104 , target->gdb_service->core[1]);
2105 }
2106 return ERROR_OK;
2107 }
2108
2109 static const struct command_registration aarch64_exec_command_handlers[] = {
2110 {
2111 .name = "cache_info",
2112 .handler = aarch64_handle_cache_info_command,
2113 .mode = COMMAND_EXEC,
2114 .help = "display information about target caches",
2115 .usage = "",
2116 },
2117 {
2118 .name = "dbginit",
2119 .handler = aarch64_handle_dbginit_command,
2120 .mode = COMMAND_EXEC,
2121 .help = "Initialize core debug",
2122 .usage = "",
2123 },
2124 { .name = "smp_off",
2125 .handler = aarch64_handle_smp_off_command,
2126 .mode = COMMAND_EXEC,
2127 .help = "Stop smp handling",
2128 .usage = "",
2129 },
2130 {
2131 .name = "smp_on",
2132 .handler = aarch64_handle_smp_on_command,
2133 .mode = COMMAND_EXEC,
2134 .help = "Restart smp handling",
2135 .usage = "",
2136 },
2137 {
2138 .name = "smp_gdb",
2139 .handler = aarch64_handle_smp_gdb_command,
2140 .mode = COMMAND_EXEC,
2141 .help = "display/fix current core played to gdb",
2142 .usage = "",
2143 },
2144
2145
2146 COMMAND_REGISTRATION_DONE
2147 };
2148 static const struct command_registration aarch64_command_handlers[] = {
2149 {
2150 .chain = arm_command_handlers,
2151 },
2152 {
2153 .chain = armv8_command_handlers,
2154 },
2155 {
2156 .name = "cortex_a",
2157 .mode = COMMAND_ANY,
2158 .help = "Cortex-A command group",
2159 .usage = "",
2160 .chain = aarch64_exec_command_handlers,
2161 },
2162 COMMAND_REGISTRATION_DONE
2163 };
2164
2165 struct target_type aarch64_target = {
2166 .name = "aarch64",
2167
2168 .poll = aarch64_poll,
2169 .arch_state = armv8_arch_state,
2170
2171 .halt = aarch64_halt,
2172 .resume = aarch64_resume,
2173 .step = aarch64_step,
2174
2175 .assert_reset = aarch64_assert_reset,
2176 .deassert_reset = aarch64_deassert_reset,
2177
2178 /* REVISIT allow exporting VFP3 registers ... */
2179 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2180
2181 .read_memory = aarch64_read_memory,
2182 .write_memory = aarch64_write_memory,
2183
2184 .checksum_memory = arm_checksum_memory,
2185 .blank_check_memory = arm_blank_check_memory,
2186
2187 .run_algorithm = armv4_5_run_algorithm,
2188
2189 .add_breakpoint = aarch64_add_breakpoint,
2190 .add_context_breakpoint = aarch64_add_context_breakpoint,
2191 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2192 .remove_breakpoint = aarch64_remove_breakpoint,
2193 .add_watchpoint = NULL,
2194 .remove_watchpoint = NULL,
2195
2196 .commands = aarch64_command_handlers,
2197 .target_create = aarch64_target_create,
2198 .init_target = aarch64_init_target,
2199 .examine = aarch64_examine,
2200
2201 .read_phys_memory = aarch64_read_phys_memory,
2202 .write_phys_memory = aarch64_write_phys_memory,
2203 .mmu = aarch64_mmu,
2204 .virt2phys = aarch64_virt2phys,
2205 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)