aarch64: reduce debug output to improve legibility
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 enum arm_mode target_mode = ARM_MODE_ANY;
53 int retval = ERROR_OK;
54 uint32_t instr;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 target_mode = ARMV8_64_EL1H;
66 /* fall through */
67 case ARMV8_64_EL1T:
68 case ARMV8_64_EL1H:
69 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
70 break;
71 case ARMV8_64_EL2T:
72 case ARMV8_64_EL2H:
73 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
74 break;
75 case ARMV8_64_EL3H:
76 case ARMV8_64_EL3T:
77 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
78 break;
79
80 case ARM_MODE_SVC:
81 case ARM_MODE_ABT:
82 case ARM_MODE_FIQ:
83 case ARM_MODE_IRQ:
84 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
85 break;
86
87 default:
88 LOG_INFO("cannot read system control register in this mode");
89 return ERROR_FAIL;
90 }
91
92 if (target_mode != ARM_MODE_ANY)
93 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
94
95 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
96 if (retval != ERROR_OK)
97 return retval;
98
99 if (target_mode != ARM_MODE_ANY)
100 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
101 }
102
103 return retval;
104 }
105
106 /* check address before aarch64_apb read write access with mmu on
107 * remove apb predictible data abort */
108 static int aarch64_check_address(struct target *target, uint32_t address)
109 {
110 /* TODO */
111 return ERROR_OK;
112 }
113 /* modify system_control_reg in order to enable or disable mmu for :
114 * - virt2phys address conversion
115 * - read or write memory in phys or virt address */
116 static int aarch64_mmu_modify(struct target *target, int enable)
117 {
118 struct aarch64_common *aarch64 = target_to_aarch64(target);
119 struct armv8_common *armv8 = &aarch64->armv8_common;
120 int retval = ERROR_OK;
121 uint32_t instr = 0;
122
123 if (enable) {
124 /* if mmu enabled at target stop and mmu not enable */
125 if (!(aarch64->system_control_reg & 0x1U)) {
126 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
127 return ERROR_FAIL;
128 }
129 if (!(aarch64->system_control_reg_curr & 0x1U))
130 aarch64->system_control_reg_curr |= 0x1U;
131 } else {
132 if (aarch64->system_control_reg_curr & 0x4U) {
133 /* data cache is active */
134 aarch64->system_control_reg_curr &= ~0x4U;
135 /* flush data cache armv8 function to be called */
136 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
137 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
138 }
139 if ((aarch64->system_control_reg_curr & 0x1U)) {
140 aarch64->system_control_reg_curr &= ~0x1U;
141 }
142 }
143
144 switch (armv8->arm.core_mode) {
145 case ARMV8_64_EL0T:
146 case ARMV8_64_EL1T:
147 case ARMV8_64_EL1H:
148 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
149 break;
150 case ARMV8_64_EL2T:
151 case ARMV8_64_EL2H:
152 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
153 break;
154 case ARMV8_64_EL3H:
155 case ARMV8_64_EL3T:
156 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
157 break;
158 default:
159 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
160 break;
161 }
162
163 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
164 aarch64->system_control_reg_curr);
165 return retval;
166 }
167
168 /*
169 * Basic debug access, very low level assumes state is saved
170 */
171 static int aarch64_init_debug_access(struct target *target)
172 {
173 struct armv8_common *armv8 = target_to_armv8(target);
174 int retval;
175 uint32_t dummy;
176
177 LOG_DEBUG(" ");
178
179 /* Clear Sticky Power Down status Bit in PRSR to enable access to
180 the registers in the Core Power Domain */
181 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
182 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
183 if (retval != ERROR_OK)
184 return retval;
185
186 /*
187 * Static CTI configuration:
188 * Channel 0 -> trigger outputs HALT request to PE
189 * Channel 1 -> trigger outputs Resume request to PE
190 * Gate all channel trigger events from entering the CTM
191 */
192
193 /* Enable CTI */
194 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
195 armv8->cti_base + CTI_CTR, 1);
196 /* By default, gate all channel triggers to and from the CTM */
197 if (retval == ERROR_OK)
198 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
199 armv8->cti_base + CTI_GATE, 0);
200 /* output halt requests to PE on channel 0 trigger */
201 if (retval == ERROR_OK)
202 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
203 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
204 /* output restart requests to PE on channel 1 trigger */
205 if (retval == ERROR_OK)
206 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
207 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
208 if (retval != ERROR_OK)
209 return retval;
210
211 /* Resync breakpoint registers */
212
213 /* Since this is likely called from init or reset, update target state information*/
214 return aarch64_poll(target);
215 }
216
217 /* Write to memory mapped registers directly with no cache or mmu handling */
218 static int aarch64_dap_write_memap_register_u32(struct target *target,
219 uint32_t address,
220 uint32_t value)
221 {
222 int retval;
223 struct armv8_common *armv8 = target_to_armv8(target);
224
225 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
226
227 return retval;
228 }
229
230 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
231 {
232 struct arm_dpm *dpm = &a8->armv8_common.dpm;
233 int retval;
234
235 dpm->arm = &a8->armv8_common.arm;
236 dpm->didr = debug;
237
238 retval = armv8_dpm_setup(dpm);
239 if (retval == ERROR_OK)
240 retval = armv8_dpm_initialize(dpm);
241
242 return retval;
243 }
244
245 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
246 {
247 struct armv8_common *armv8 = target_to_armv8(target);
248 uint32_t dscr;
249
250 /* Read DSCR */
251 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
252 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
253 if (ERROR_OK != retval)
254 return retval;
255
256 /* clear bitfield */
257 dscr &= ~bit_mask;
258 /* put new value */
259 dscr |= value & bit_mask;
260
261 /* write new DSCR */
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
263 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
264 return retval;
265 }
266
267 static struct target *get_aarch64(struct target *target, int32_t coreid)
268 {
269 struct target_list *head;
270 struct target *curr;
271
272 head = target->head;
273 while (head != (struct target_list *)NULL) {
274 curr = head->target;
275 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
276 return curr;
277 head = head->next;
278 }
279 return target;
280 }
281 static int aarch64_halt(struct target *target);
282
283 static int aarch64_halt_smp(struct target *target)
284 {
285 int retval = ERROR_OK;
286 struct target_list *head = target->head;
287
288 while (head != (struct target_list *)NULL) {
289 struct target *curr = head->target;
290 struct armv8_common *armv8 = target_to_armv8(curr);
291
292 /* open the gate for channel 0 to let HALT requests pass to the CTM */
293 if (curr->smp) {
294 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
295 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
296 if (retval == ERROR_OK)
297 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
298 }
299 if (retval != ERROR_OK)
300 break;
301
302 head = head->next;
303 }
304
305 /* halt the target PE */
306 if (retval == ERROR_OK)
307 retval = aarch64_halt(target);
308
309 return retval;
310 }
311
312 static int update_halt_gdb(struct target *target)
313 {
314 int retval = 0;
315 if (target->gdb_service && target->gdb_service->core[0] == -1) {
316 target->gdb_service->target = target;
317 target->gdb_service->core[0] = target->coreid;
318 retval += aarch64_halt_smp(target);
319 }
320 return retval;
321 }
322
323 /*
324 * Cortex-A8 Run control
325 */
326
327 static int aarch64_poll(struct target *target)
328 {
329 int retval = ERROR_OK;
330 uint32_t dscr;
331 struct aarch64_common *aarch64 = target_to_aarch64(target);
332 struct armv8_common *armv8 = &aarch64->armv8_common;
333 enum target_state prev_target_state = target->state;
334 /* toggle to another core is done by gdb as follow */
335 /* maint packet J core_id */
336 /* continue */
337 /* the next polling trigger an halt event sent to gdb */
338 if ((target->state == TARGET_HALTED) && (target->smp) &&
339 (target->gdb_service) &&
340 (target->gdb_service->target == NULL)) {
341 target->gdb_service->target =
342 get_aarch64(target, target->gdb_service->core[1]);
343 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
344 return retval;
345 }
346 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
347 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 aarch64->cpudbg_dscr = dscr;
351
352 if (DSCR_RUN_MODE(dscr) == 0x3) {
353 if (prev_target_state != TARGET_HALTED) {
354 /* We have a halting debug event */
355 LOG_DEBUG("Target %s halted", target_name(target));
356 target->state = TARGET_HALTED;
357 if ((prev_target_state == TARGET_RUNNING)
358 || (prev_target_state == TARGET_UNKNOWN)
359 || (prev_target_state == TARGET_RESET)) {
360 retval = aarch64_debug_entry(target);
361 if (retval != ERROR_OK)
362 return retval;
363 if (target->smp) {
364 retval = update_halt_gdb(target);
365 if (retval != ERROR_OK)
366 return retval;
367 }
368 target_call_event_callbacks(target,
369 TARGET_EVENT_HALTED);
370 }
371 if (prev_target_state == TARGET_DEBUG_RUNNING) {
372 LOG_DEBUG(" ");
373
374 retval = aarch64_debug_entry(target);
375 if (retval != ERROR_OK)
376 return retval;
377 if (target->smp) {
378 retval = update_halt_gdb(target);
379 if (retval != ERROR_OK)
380 return retval;
381 }
382
383 target_call_event_callbacks(target,
384 TARGET_EVENT_DEBUG_HALTED);
385 }
386 }
387 } else
388 target->state = TARGET_RUNNING;
389
390 return retval;
391 }
392
393 static int aarch64_halt(struct target *target)
394 {
395 int retval = ERROR_OK;
396 uint32_t dscr;
397 struct armv8_common *armv8 = target_to_armv8(target);
398
399 /*
400 * add HDE in halting debug mode
401 */
402 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
403 if (retval != ERROR_OK)
404 return retval;
405
406 /* trigger an event on channel 0, this outputs a halt request to the PE */
407 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
408 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
409 if (retval != ERROR_OK)
410 return retval;
411
412 long long then = timeval_ms();
413 for (;; ) {
414 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
415 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
416 if (retval != ERROR_OK)
417 return retval;
418 if ((dscr & DSCRV8_HALT_MASK) != 0)
419 break;
420 if (timeval_ms() > then + 1000) {
421 LOG_ERROR("Timeout waiting for halt");
422 return ERROR_FAIL;
423 }
424 }
425
426 target->debug_reason = DBG_REASON_DBGRQ;
427
428 return ERROR_OK;
429 }
430
431 static int aarch64_internal_restore(struct target *target, int current,
432 uint64_t *address, int handle_breakpoints, int debug_execution)
433 {
434 struct armv8_common *armv8 = target_to_armv8(target);
435 struct arm *arm = &armv8->arm;
436 int retval;
437 uint64_t resume_pc;
438
439 if (!debug_execution)
440 target_free_all_working_areas(target);
441
442 /* current = 1: continue on current pc, otherwise continue at <address> */
443 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
444 if (!current)
445 resume_pc = *address;
446 else
447 *address = resume_pc;
448
449 /* Make sure that the Armv7 gdb thumb fixups does not
450 * kill the return address
451 */
452 switch (arm->core_state) {
453 case ARM_STATE_ARM:
454 resume_pc &= 0xFFFFFFFC;
455 break;
456 case ARM_STATE_AARCH64:
457 resume_pc &= 0xFFFFFFFFFFFFFFFC;
458 break;
459 case ARM_STATE_THUMB:
460 case ARM_STATE_THUMB_EE:
461 /* When the return address is loaded into PC
462 * bit 0 must be 1 to stay in Thumb state
463 */
464 resume_pc |= 0x1;
465 break;
466 case ARM_STATE_JAZELLE:
467 LOG_ERROR("How do I resume into Jazelle state??");
468 return ERROR_FAIL;
469 }
470 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
471 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
472 arm->pc->dirty = 1;
473 arm->pc->valid = 1;
474
475 /* called it now before restoring context because it uses cpu
476 * register r0 for restoring system control register */
477 retval = aarch64_restore_system_control_reg(target);
478 if (retval == ERROR_OK)
479 retval = aarch64_restore_context(target, handle_breakpoints);
480
481 return retval;
482 }
483
484 static int aarch64_internal_restart(struct target *target, bool slave_pe)
485 {
486 struct armv8_common *armv8 = target_to_armv8(target);
487 struct arm *arm = &armv8->arm;
488 int retval;
489 uint32_t dscr;
490 /*
491 * * Restart core and wait for it to be started. Clear ITRen and sticky
492 * * exception flags: see ARMv7 ARM, C5.9.
493 *
494 * REVISIT: for single stepping, we probably want to
495 * disable IRQs by default, with optional override...
496 */
497
498 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
499 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
500 if (retval != ERROR_OK)
501 return retval;
502
503 if ((dscr & DSCR_ITE) == 0)
504 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
505 if ((dscr & DSCR_ERR) != 0)
506 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
507
508 /* make sure to acknowledge the halt event before resuming */
509 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
510 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
511
512 /*
513 * open the CTI gate for channel 1 so that the restart events
514 * get passed along to all PEs
515 */
516 if (retval == ERROR_OK)
517 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
518 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
519 if (retval != ERROR_OK)
520 return retval;
521
522 if (!slave_pe) {
523 /* trigger an event on channel 1, generates a restart request to the PE */
524 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
525 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
526 if (retval != ERROR_OK)
527 return retval;
528
529 long long then = timeval_ms();
530 for (;; ) {
531 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
532 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
533 if (retval != ERROR_OK)
534 return retval;
535 if ((dscr & DSCR_HDE) != 0)
536 break;
537 if (timeval_ms() > then + 1000) {
538 LOG_ERROR("Timeout waiting for resume");
539 return ERROR_FAIL;
540 }
541 }
542 }
543
544 target->debug_reason = DBG_REASON_NOTHALTED;
545 target->state = TARGET_RUNNING;
546
547 /* registers are now invalid */
548 register_cache_invalidate(arm->core_cache);
549 register_cache_invalidate(arm->core_cache->next);
550
551 return ERROR_OK;
552 }
553
554 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
555 {
556 int retval = 0;
557 struct target_list *head;
558 struct target *curr;
559 uint64_t address;
560 head = target->head;
561 while (head != (struct target_list *)NULL) {
562 curr = head->target;
563 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
564 /* resume current address , not in step mode */
565 retval += aarch64_internal_restore(curr, 1, &address,
566 handle_breakpoints, 0);
567 retval += aarch64_internal_restart(curr, true);
568 }
569 head = head->next;
570
571 }
572 return retval;
573 }
574
575 static int aarch64_resume(struct target *target, int current,
576 target_addr_t address, int handle_breakpoints, int debug_execution)
577 {
578 int retval = 0;
579 uint64_t addr = address;
580
581 /* dummy resume for smp toggle in order to reduce gdb impact */
582 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
583 /* simulate a start and halt of target */
584 target->gdb_service->target = NULL;
585 target->gdb_service->core[0] = target->gdb_service->core[1];
586 /* fake resume at next poll we play the target core[1], see poll*/
587 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
588 return 0;
589 }
590
591 if (target->state != TARGET_HALTED)
592 return ERROR_TARGET_NOT_HALTED;
593
594 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
595 debug_execution);
596 if (target->smp) {
597 target->gdb_service->core[0] = -1;
598 retval = aarch64_restore_smp(target, handle_breakpoints);
599 if (retval != ERROR_OK)
600 return retval;
601 }
602 aarch64_internal_restart(target, false);
603
604 if (!debug_execution) {
605 target->state = TARGET_RUNNING;
606 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
607 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
608 } else {
609 target->state = TARGET_DEBUG_RUNNING;
610 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
611 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
612 }
613
614 return ERROR_OK;
615 }
616
617 static int aarch64_debug_entry(struct target *target)
618 {
619 int retval = ERROR_OK;
620 struct aarch64_common *aarch64 = target_to_aarch64(target);
621 struct armv8_common *armv8 = target_to_armv8(target);
622 struct arm_dpm *dpm = &armv8->dpm;
623 enum arm_state core_state;
624
625 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
626
627 dpm->dscr = aarch64->cpudbg_dscr;
628 core_state = armv8_dpm_get_core_state(dpm);
629 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
630 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
631
632 /* make sure to clear all sticky errors */
633 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
635
636 /* discard async exceptions */
637 if (retval == ERROR_OK)
638 retval = dpm->instr_cpsr_sync(dpm);
639
640 if (retval != ERROR_OK)
641 return retval;
642
643 /* Examine debug reason */
644 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
645
646 /* save address of instruction that triggered the watchpoint? */
647 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
648 uint32_t tmp;
649 uint64_t wfar = 0;
650
651 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
652 armv8->debug_base + CPUV8_DBG_WFAR1,
653 &tmp);
654 if (retval != ERROR_OK)
655 return retval;
656 wfar = tmp;
657 wfar = (wfar << 32);
658 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
659 armv8->debug_base + CPUV8_DBG_WFAR0,
660 &tmp);
661 if (retval != ERROR_OK)
662 return retval;
663 wfar |= tmp;
664 armv8_dpm_report_wfar(&armv8->dpm, wfar);
665 }
666
667 retval = armv8_dpm_read_current_registers(&armv8->dpm);
668
669 if (retval == ERROR_OK && armv8->post_debug_entry)
670 retval = armv8->post_debug_entry(target);
671
672 return retval;
673 }
674
675 static int aarch64_post_debug_entry(struct target *target)
676 {
677 struct aarch64_common *aarch64 = target_to_aarch64(target);
678 struct armv8_common *armv8 = &aarch64->armv8_common;
679 int retval;
680 enum arm_mode target_mode = ARM_MODE_ANY;
681 uint32_t instr;
682
683 switch (armv8->arm.core_mode) {
684 case ARMV8_64_EL0T:
685 target_mode = ARMV8_64_EL1H;
686 /* fall through */
687 case ARMV8_64_EL1T:
688 case ARMV8_64_EL1H:
689 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
690 break;
691 case ARMV8_64_EL2T:
692 case ARMV8_64_EL2H:
693 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
694 break;
695 case ARMV8_64_EL3H:
696 case ARMV8_64_EL3T:
697 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
698 break;
699
700 case ARM_MODE_SVC:
701 case ARM_MODE_ABT:
702 case ARM_MODE_FIQ:
703 case ARM_MODE_IRQ:
704 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
705 break;
706
707 default:
708 LOG_INFO("cannot read system control register in this mode");
709 return ERROR_FAIL;
710 }
711
712 if (target_mode != ARM_MODE_ANY)
713 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
714
715 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
716 if (retval != ERROR_OK)
717 return retval;
718
719 if (target_mode != ARM_MODE_ANY)
720 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
721
722 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
723 aarch64->system_control_reg_curr = aarch64->system_control_reg;
724
725 if (armv8->armv8_mmu.armv8_cache.info == -1) {
726 armv8_identify_cache(armv8);
727 armv8_read_mpidr(armv8);
728 }
729
730 armv8->armv8_mmu.mmu_enabled =
731 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
732 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
733 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
734 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
735 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
736 aarch64->curr_mode = armv8->arm.core_mode;
737 return ERROR_OK;
738 }
739
740 static int aarch64_step(struct target *target, int current, target_addr_t address,
741 int handle_breakpoints)
742 {
743 struct armv8_common *armv8 = target_to_armv8(target);
744 int retval;
745 uint32_t edecr;
746
747 if (target->state != TARGET_HALTED) {
748 LOG_WARNING("target not halted");
749 return ERROR_TARGET_NOT_HALTED;
750 }
751
752 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
753 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
754 if (retval != ERROR_OK)
755 return retval;
756
757 /* make sure EDECR.SS is not set when restoring the register */
758 edecr &= ~0x4;
759
760 /* set EDECR.SS to enter hardware step mode */
761 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
762 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
763 if (retval != ERROR_OK)
764 return retval;
765
766 /* disable interrupts while stepping */
767 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
768 if (retval != ERROR_OK)
769 return ERROR_OK;
770
771 /* resume the target */
772 retval = aarch64_resume(target, current, address, 0, 0);
773 if (retval != ERROR_OK)
774 return retval;
775
776 long long then = timeval_ms();
777 while (target->state != TARGET_HALTED) {
778 retval = aarch64_poll(target);
779 if (retval != ERROR_OK)
780 return retval;
781 if (timeval_ms() > then + 1000) {
782 LOG_ERROR("timeout waiting for target halt");
783 return ERROR_FAIL;
784 }
785 }
786
787 /* restore EDECR */
788 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
789 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
790 if (retval != ERROR_OK)
791 return retval;
792
793 /* restore interrupts */
794 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
795 if (retval != ERROR_OK)
796 return ERROR_OK;
797
798 return ERROR_OK;
799 }
800
801 static int aarch64_restore_context(struct target *target, bool bpwp)
802 {
803 struct armv8_common *armv8 = target_to_armv8(target);
804
805 LOG_DEBUG("%s", target_name(target));
806
807 if (armv8->pre_restore_context)
808 armv8->pre_restore_context(target);
809
810 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
811 }
812
813 /*
814 * Cortex-A8 Breakpoint and watchpoint functions
815 */
816
817 /* Setup hardware Breakpoint Register Pair */
818 static int aarch64_set_breakpoint(struct target *target,
819 struct breakpoint *breakpoint, uint8_t matchmode)
820 {
821 int retval;
822 int brp_i = 0;
823 uint32_t control;
824 uint8_t byte_addr_select = 0x0F;
825 struct aarch64_common *aarch64 = target_to_aarch64(target);
826 struct armv8_common *armv8 = &aarch64->armv8_common;
827 struct aarch64_brp *brp_list = aarch64->brp_list;
828
829 if (breakpoint->set) {
830 LOG_WARNING("breakpoint already set");
831 return ERROR_OK;
832 }
833
834 if (breakpoint->type == BKPT_HARD) {
835 int64_t bpt_value;
836 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
837 brp_i++;
838 if (brp_i >= aarch64->brp_num) {
839 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
840 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
841 }
842 breakpoint->set = brp_i + 1;
843 if (breakpoint->length == 2)
844 byte_addr_select = (3 << (breakpoint->address & 0x02));
845 control = ((matchmode & 0x7) << 20)
846 | (1 << 13)
847 | (byte_addr_select << 5)
848 | (3 << 1) | 1;
849 brp_list[brp_i].used = 1;
850 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
851 brp_list[brp_i].control = control;
852 bpt_value = brp_list[brp_i].value;
853
854 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
855 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
856 (uint32_t)(bpt_value & 0xFFFFFFFF));
857 if (retval != ERROR_OK)
858 return retval;
859 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
860 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
861 (uint32_t)(bpt_value >> 32));
862 if (retval != ERROR_OK)
863 return retval;
864
865 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
866 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
867 brp_list[brp_i].control);
868 if (retval != ERROR_OK)
869 return retval;
870 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
871 brp_list[brp_i].control,
872 brp_list[brp_i].value);
873
874 } else if (breakpoint->type == BKPT_SOFT) {
875 uint8_t code[4];
876
877 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
878 retval = target_read_memory(target,
879 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
880 breakpoint->length, 1,
881 breakpoint->orig_instr);
882 if (retval != ERROR_OK)
883 return retval;
884
885 armv8_cache_d_inner_flush_virt(armv8,
886 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
887 breakpoint->length);
888
889 retval = target_write_memory(target,
890 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
891 breakpoint->length, 1, code);
892 if (retval != ERROR_OK)
893 return retval;
894
895 armv8_cache_d_inner_flush_virt(armv8,
896 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
897 breakpoint->length);
898
899 armv8_cache_i_inner_inval_virt(armv8,
900 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
901 breakpoint->length);
902
903 breakpoint->set = 0x11; /* Any nice value but 0 */
904 }
905
906 /* Ensure that halting debug mode is enable */
907 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
908 if (retval != ERROR_OK) {
909 LOG_DEBUG("Failed to set DSCR.HDE");
910 return retval;
911 }
912
913 return ERROR_OK;
914 }
915
916 static int aarch64_set_context_breakpoint(struct target *target,
917 struct breakpoint *breakpoint, uint8_t matchmode)
918 {
919 int retval = ERROR_FAIL;
920 int brp_i = 0;
921 uint32_t control;
922 uint8_t byte_addr_select = 0x0F;
923 struct aarch64_common *aarch64 = target_to_aarch64(target);
924 struct armv8_common *armv8 = &aarch64->armv8_common;
925 struct aarch64_brp *brp_list = aarch64->brp_list;
926
927 if (breakpoint->set) {
928 LOG_WARNING("breakpoint already set");
929 return retval;
930 }
931 /*check available context BRPs*/
932 while ((brp_list[brp_i].used ||
933 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
934 brp_i++;
935
936 if (brp_i >= aarch64->brp_num) {
937 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
938 return ERROR_FAIL;
939 }
940
941 breakpoint->set = brp_i + 1;
942 control = ((matchmode & 0x7) << 20)
943 | (1 << 13)
944 | (byte_addr_select << 5)
945 | (3 << 1) | 1;
946 brp_list[brp_i].used = 1;
947 brp_list[brp_i].value = (breakpoint->asid);
948 brp_list[brp_i].control = control;
949 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
950 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
951 brp_list[brp_i].value);
952 if (retval != ERROR_OK)
953 return retval;
954 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
955 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
956 brp_list[brp_i].control);
957 if (retval != ERROR_OK)
958 return retval;
959 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
960 brp_list[brp_i].control,
961 brp_list[brp_i].value);
962 return ERROR_OK;
963
964 }
965
966 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
967 {
968 int retval = ERROR_FAIL;
969 int brp_1 = 0; /* holds the contextID pair */
970 int brp_2 = 0; /* holds the IVA pair */
971 uint32_t control_CTX, control_IVA;
972 uint8_t CTX_byte_addr_select = 0x0F;
973 uint8_t IVA_byte_addr_select = 0x0F;
974 uint8_t CTX_machmode = 0x03;
975 uint8_t IVA_machmode = 0x01;
976 struct aarch64_common *aarch64 = target_to_aarch64(target);
977 struct armv8_common *armv8 = &aarch64->armv8_common;
978 struct aarch64_brp *brp_list = aarch64->brp_list;
979
980 if (breakpoint->set) {
981 LOG_WARNING("breakpoint already set");
982 return retval;
983 }
984 /*check available context BRPs*/
985 while ((brp_list[brp_1].used ||
986 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
987 brp_1++;
988
989 printf("brp(CTX) found num: %d\n", brp_1);
990 if (brp_1 >= aarch64->brp_num) {
991 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
992 return ERROR_FAIL;
993 }
994
995 while ((brp_list[brp_2].used ||
996 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
997 brp_2++;
998
999 printf("brp(IVA) found num: %d\n", brp_2);
1000 if (brp_2 >= aarch64->brp_num) {
1001 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1002 return ERROR_FAIL;
1003 }
1004
1005 breakpoint->set = brp_1 + 1;
1006 breakpoint->linked_BRP = brp_2;
1007 control_CTX = ((CTX_machmode & 0x7) << 20)
1008 | (brp_2 << 16)
1009 | (0 << 14)
1010 | (CTX_byte_addr_select << 5)
1011 | (3 << 1) | 1;
1012 brp_list[brp_1].used = 1;
1013 brp_list[brp_1].value = (breakpoint->asid);
1014 brp_list[brp_1].control = control_CTX;
1015 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1016 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1017 brp_list[brp_1].value);
1018 if (retval != ERROR_OK)
1019 return retval;
1020 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1021 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1022 brp_list[brp_1].control);
1023 if (retval != ERROR_OK)
1024 return retval;
1025
1026 control_IVA = ((IVA_machmode & 0x7) << 20)
1027 | (brp_1 << 16)
1028 | (1 << 13)
1029 | (IVA_byte_addr_select << 5)
1030 | (3 << 1) | 1;
1031 brp_list[brp_2].used = 1;
1032 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1033 brp_list[brp_2].control = control_IVA;
1034 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1035 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1036 brp_list[brp_2].value & 0xFFFFFFFF);
1037 if (retval != ERROR_OK)
1038 return retval;
1039 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1040 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1041 brp_list[brp_2].value >> 32);
1042 if (retval != ERROR_OK)
1043 return retval;
1044 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1045 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1046 brp_list[brp_2].control);
1047 if (retval != ERROR_OK)
1048 return retval;
1049
1050 return ERROR_OK;
1051 }
1052
1053 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1054 {
1055 int retval;
1056 struct aarch64_common *aarch64 = target_to_aarch64(target);
1057 struct armv8_common *armv8 = &aarch64->armv8_common;
1058 struct aarch64_brp *brp_list = aarch64->brp_list;
1059
1060 if (!breakpoint->set) {
1061 LOG_WARNING("breakpoint not set");
1062 return ERROR_OK;
1063 }
1064
1065 if (breakpoint->type == BKPT_HARD) {
1066 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1067 int brp_i = breakpoint->set - 1;
1068 int brp_j = breakpoint->linked_BRP;
1069 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1070 LOG_DEBUG("Invalid BRP number in breakpoint");
1071 return ERROR_OK;
1072 }
1073 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1074 brp_list[brp_i].control, brp_list[brp_i].value);
1075 brp_list[brp_i].used = 0;
1076 brp_list[brp_i].value = 0;
1077 brp_list[brp_i].control = 0;
1078 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1079 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1080 brp_list[brp_i].control);
1081 if (retval != ERROR_OK)
1082 return retval;
1083 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1084 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1085 (uint32_t)brp_list[brp_i].value);
1086 if (retval != ERROR_OK)
1087 return retval;
1088 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1089 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1090 (uint32_t)brp_list[brp_i].value);
1091 if (retval != ERROR_OK)
1092 return retval;
1093 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1094 LOG_DEBUG("Invalid BRP number in breakpoint");
1095 return ERROR_OK;
1096 }
1097 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1098 brp_list[brp_j].control, brp_list[brp_j].value);
1099 brp_list[brp_j].used = 0;
1100 brp_list[brp_j].value = 0;
1101 brp_list[brp_j].control = 0;
1102 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1103 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1104 brp_list[brp_j].control);
1105 if (retval != ERROR_OK)
1106 return retval;
1107 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1108 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1109 (uint32_t)brp_list[brp_j].value);
1110 if (retval != ERROR_OK)
1111 return retval;
1112 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1113 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1114 (uint32_t)brp_list[brp_j].value);
1115 if (retval != ERROR_OK)
1116 return retval;
1117
1118 breakpoint->linked_BRP = 0;
1119 breakpoint->set = 0;
1120 return ERROR_OK;
1121
1122 } else {
1123 int brp_i = breakpoint->set - 1;
1124 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1125 LOG_DEBUG("Invalid BRP number in breakpoint");
1126 return ERROR_OK;
1127 }
1128 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1129 brp_list[brp_i].control, brp_list[brp_i].value);
1130 brp_list[brp_i].used = 0;
1131 brp_list[brp_i].value = 0;
1132 brp_list[brp_i].control = 0;
1133 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1134 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1135 brp_list[brp_i].control);
1136 if (retval != ERROR_OK)
1137 return retval;
1138 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1139 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1140 brp_list[brp_i].value);
1141 if (retval != ERROR_OK)
1142 return retval;
1143
1144 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1145 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1146 (uint32_t)brp_list[brp_i].value);
1147 if (retval != ERROR_OK)
1148 return retval;
1149 breakpoint->set = 0;
1150 return ERROR_OK;
1151 }
1152 } else {
1153 /* restore original instruction (kept in target endianness) */
1154
1155 armv8_cache_d_inner_flush_virt(armv8,
1156 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1157 breakpoint->length);
1158
1159 if (breakpoint->length == 4) {
1160 retval = target_write_memory(target,
1161 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1162 4, 1, breakpoint->orig_instr);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 } else {
1166 retval = target_write_memory(target,
1167 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1168 2, 1, breakpoint->orig_instr);
1169 if (retval != ERROR_OK)
1170 return retval;
1171 }
1172
1173 armv8_cache_d_inner_flush_virt(armv8,
1174 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1175 breakpoint->length);
1176
1177 armv8_cache_i_inner_inval_virt(armv8,
1178 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1179 breakpoint->length);
1180 }
1181 breakpoint->set = 0;
1182
1183 return ERROR_OK;
1184 }
1185
1186 static int aarch64_add_breakpoint(struct target *target,
1187 struct breakpoint *breakpoint)
1188 {
1189 struct aarch64_common *aarch64 = target_to_aarch64(target);
1190
1191 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1192 LOG_INFO("no hardware breakpoint available");
1193 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1194 }
1195
1196 if (breakpoint->type == BKPT_HARD)
1197 aarch64->brp_num_available--;
1198
1199 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1200 }
1201
1202 static int aarch64_add_context_breakpoint(struct target *target,
1203 struct breakpoint *breakpoint)
1204 {
1205 struct aarch64_common *aarch64 = target_to_aarch64(target);
1206
1207 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1208 LOG_INFO("no hardware breakpoint available");
1209 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1210 }
1211
1212 if (breakpoint->type == BKPT_HARD)
1213 aarch64->brp_num_available--;
1214
1215 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1216 }
1217
1218 static int aarch64_add_hybrid_breakpoint(struct target *target,
1219 struct breakpoint *breakpoint)
1220 {
1221 struct aarch64_common *aarch64 = target_to_aarch64(target);
1222
1223 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1224 LOG_INFO("no hardware breakpoint available");
1225 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1226 }
1227
1228 if (breakpoint->type == BKPT_HARD)
1229 aarch64->brp_num_available--;
1230
1231 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1232 }
1233
1234
1235 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1236 {
1237 struct aarch64_common *aarch64 = target_to_aarch64(target);
1238
1239 #if 0
1240 /* It is perfectly possible to remove breakpoints while the target is running */
1241 if (target->state != TARGET_HALTED) {
1242 LOG_WARNING("target not halted");
1243 return ERROR_TARGET_NOT_HALTED;
1244 }
1245 #endif
1246
1247 if (breakpoint->set) {
1248 aarch64_unset_breakpoint(target, breakpoint);
1249 if (breakpoint->type == BKPT_HARD)
1250 aarch64->brp_num_available++;
1251 }
1252
1253 return ERROR_OK;
1254 }
1255
1256 /*
1257 * Cortex-A8 Reset functions
1258 */
1259
1260 static int aarch64_assert_reset(struct target *target)
1261 {
1262 struct armv8_common *armv8 = target_to_armv8(target);
1263
1264 LOG_DEBUG(" ");
1265
1266 /* FIXME when halt is requested, make it work somehow... */
1267
1268 /* Issue some kind of warm reset. */
1269 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1270 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1271 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1272 /* REVISIT handle "pulls" cases, if there's
1273 * hardware that needs them to work.
1274 */
1275 jtag_add_reset(0, 1);
1276 } else {
1277 LOG_ERROR("%s: how to reset?", target_name(target));
1278 return ERROR_FAIL;
1279 }
1280
1281 /* registers are now invalid */
1282 if (target_was_examined(target))
1283 register_cache_invalidate(armv8->arm.core_cache);
1284
1285 target->state = TARGET_RESET;
1286
1287 return ERROR_OK;
1288 }
1289
1290 static int aarch64_deassert_reset(struct target *target)
1291 {
1292 int retval;
1293
1294 LOG_DEBUG(" ");
1295
1296 /* be certain SRST is off */
1297 jtag_add_reset(0, 0);
1298
1299 if (!target_was_examined(target))
1300 return ERROR_OK;
1301
1302 retval = aarch64_poll(target);
1303 if (retval != ERROR_OK)
1304 return retval;
1305
1306 if (target->reset_halt) {
1307 if (target->state != TARGET_HALTED) {
1308 LOG_WARNING("%s: ran after reset and before halt ...",
1309 target_name(target));
1310 retval = target_halt(target);
1311 if (retval != ERROR_OK)
1312 return retval;
1313 }
1314 }
1315
1316 return ERROR_OK;
1317 }
1318
1319 static int aarch64_write_apb_ap_memory(struct target *target,
1320 uint64_t address, uint32_t size,
1321 uint32_t count, const uint8_t *buffer)
1322 {
1323 /* write memory through APB-AP */
1324 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1325 struct armv8_common *armv8 = target_to_armv8(target);
1326 struct arm_dpm *dpm = &armv8->dpm;
1327 struct arm *arm = &armv8->arm;
1328 int total_bytes = count * size;
1329 int total_u32;
1330 int start_byte = address & 0x3;
1331 int end_byte = (address + total_bytes) & 0x3;
1332 struct reg *reg;
1333 uint32_t dscr;
1334 uint8_t *tmp_buff = NULL;
1335
1336 if (target->state != TARGET_HALTED) {
1337 LOG_WARNING("target not halted");
1338 return ERROR_TARGET_NOT_HALTED;
1339 }
1340
1341 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1342
1343 /* Mark register R0 as dirty, as it will be used
1344 * for transferring the data.
1345 * It will be restored automatically when exiting
1346 * debug mode
1347 */
1348 reg = armv8_reg_current(arm, 1);
1349 reg->dirty = true;
1350
1351 reg = armv8_reg_current(arm, 0);
1352 reg->dirty = true;
1353
1354 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1355
1356 /* The algorithm only copies 32 bit words, so the buffer
1357 * should be expanded to include the words at either end.
1358 * The first and last words will be read first to avoid
1359 * corruption if needed.
1360 */
1361 tmp_buff = malloc(total_u32 * 4);
1362
1363 if ((start_byte != 0) && (total_u32 > 1)) {
1364 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1365 * the other bytes in the word.
1366 */
1367 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1368 if (retval != ERROR_OK)
1369 goto error_free_buff_w;
1370 }
1371
1372 /* If end of write is not aligned, or the write is less than 4 bytes */
1373 if ((end_byte != 0) ||
1374 ((total_u32 == 1) && (total_bytes != 4))) {
1375
1376 /* Read the last word to avoid corruption during 32 bit write */
1377 int mem_offset = (total_u32-1) * 4;
1378 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1379 if (retval != ERROR_OK)
1380 goto error_free_buff_w;
1381 }
1382
1383 /* Copy the write buffer over the top of the temporary buffer */
1384 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1385
1386 /* We now have a 32 bit aligned buffer that can be written */
1387
1388 /* Read DSCR */
1389 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1390 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1391 if (retval != ERROR_OK)
1392 goto error_free_buff_w;
1393
1394 /* Set Normal access mode */
1395 dscr = (dscr & ~DSCR_MA);
1396 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1397 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1398
1399 if (arm->core_state == ARM_STATE_AARCH64) {
1400 /* Write X0 with value 'address' using write procedure */
1401 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1402 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1403 retval = dpm->instr_write_data_dcc_64(dpm,
1404 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1405 } else {
1406 /* Write R0 with value 'address' using write procedure */
1407 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1408 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1409 dpm->instr_write_data_dcc(dpm,
1410 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1411
1412 }
1413 /* Step 1.d - Change DCC to memory mode */
1414 dscr = dscr | DSCR_MA;
1415 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1416 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1417 if (retval != ERROR_OK)
1418 goto error_unset_dtr_w;
1419
1420
1421 /* Step 2.a - Do the write */
1422 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1423 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1424 if (retval != ERROR_OK)
1425 goto error_unset_dtr_w;
1426
1427 /* Step 3.a - Switch DTR mode back to Normal mode */
1428 dscr = (dscr & ~DSCR_MA);
1429 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1430 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1431 if (retval != ERROR_OK)
1432 goto error_unset_dtr_w;
1433
1434 /* Check for sticky abort flags in the DSCR */
1435 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1436 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1437 if (retval != ERROR_OK)
1438 goto error_free_buff_w;
1439
1440 dpm->dscr = dscr;
1441 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1442 /* Abort occurred - clear it and exit */
1443 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1444 armv8_dpm_handle_exception(dpm);
1445 goto error_free_buff_w;
1446 }
1447
1448 /* Done */
1449 free(tmp_buff);
1450 return ERROR_OK;
1451
1452 error_unset_dtr_w:
1453 /* Unset DTR mode */
1454 mem_ap_read_atomic_u32(armv8->debug_ap,
1455 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1456 dscr = (dscr & ~DSCR_MA);
1457 mem_ap_write_atomic_u32(armv8->debug_ap,
1458 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1459 error_free_buff_w:
1460 LOG_ERROR("error");
1461 free(tmp_buff);
1462 return ERROR_FAIL;
1463 }
1464
1465 static int aarch64_read_apb_ap_memory(struct target *target,
1466 target_addr_t address, uint32_t size,
1467 uint32_t count, uint8_t *buffer)
1468 {
1469 /* read memory through APB-AP */
1470 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1471 struct armv8_common *armv8 = target_to_armv8(target);
1472 struct arm_dpm *dpm = &armv8->dpm;
1473 struct arm *arm = &armv8->arm;
1474 int total_bytes = count * size;
1475 int total_u32;
1476 int start_byte = address & 0x3;
1477 int end_byte = (address + total_bytes) & 0x3;
1478 struct reg *reg;
1479 uint32_t dscr;
1480 uint8_t *tmp_buff = NULL;
1481 uint8_t *u8buf_ptr;
1482 uint32_t value;
1483
1484 if (target->state != TARGET_HALTED) {
1485 LOG_WARNING("target not halted");
1486 return ERROR_TARGET_NOT_HALTED;
1487 }
1488
1489 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1490 /* Mark register X0, X1 as dirty, as it will be used
1491 * for transferring the data.
1492 * It will be restored automatically when exiting
1493 * debug mode
1494 */
1495 reg = armv8_reg_current(arm, 1);
1496 reg->dirty = true;
1497
1498 reg = armv8_reg_current(arm, 0);
1499 reg->dirty = true;
1500
1501 /* Read DSCR */
1502 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1503 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1504
1505 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1506
1507 /* Set Normal access mode */
1508 dscr = (dscr & ~DSCR_MA);
1509 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1510 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1511
1512 if (arm->core_state == ARM_STATE_AARCH64) {
1513 /* Write X0 with value 'address' using write procedure */
1514 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1515 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1516 retval += dpm->instr_write_data_dcc_64(dpm,
1517 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1518 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1519 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1520 /* Step 1.e - Change DCC to memory mode */
1521 dscr = dscr | DSCR_MA;
1522 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1523 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1524 /* Step 1.f - read DBGDTRTX and discard the value */
1525 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1526 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1527 } else {
1528 /* Write R0 with value 'address' using write procedure */
1529 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1530 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1531 retval += dpm->instr_write_data_dcc(dpm,
1532 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1533 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1534 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1535 /* Step 1.e - Change DCC to memory mode */
1536 dscr = dscr | DSCR_MA;
1537 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1538 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1539 /* Step 1.f - read DBGDTRTX and discard the value */
1540 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1541 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1542
1543 }
1544 if (retval != ERROR_OK)
1545 goto error_unset_dtr_r;
1546
1547 /* Optimize the read as much as we can, either way we read in a single pass */
1548 if ((start_byte) || (end_byte)) {
1549 /* The algorithm only copies 32 bit words, so the buffer
1550 * should be expanded to include the words at either end.
1551 * The first and last words will be read into a temp buffer
1552 * to avoid corruption
1553 */
1554 tmp_buff = malloc(total_u32 * 4);
1555 if (!tmp_buff)
1556 goto error_unset_dtr_r;
1557
1558 /* use the tmp buffer to read the entire data */
1559 u8buf_ptr = tmp_buff;
1560 } else
1561 /* address and read length are aligned so read directly into the passed buffer */
1562 u8buf_ptr = buffer;
1563
1564 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1565 * Abort flags are sticky, so can be read at end of transactions
1566 *
1567 * This data is read in aligned to 32 bit boundary.
1568 */
1569
1570 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1571 * increments X0 by 4. */
1572 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1573 armv8->debug_base + CPUV8_DBG_DTRTX);
1574 if (retval != ERROR_OK)
1575 goto error_unset_dtr_r;
1576
1577 /* Step 3.a - set DTR access mode back to Normal mode */
1578 dscr = (dscr & ~DSCR_MA);
1579 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1580 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1581 if (retval != ERROR_OK)
1582 goto error_free_buff_r;
1583
1584 /* Step 3.b - read DBGDTRTX for the final value */
1585 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1586 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1587 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1588
1589 /* Check for sticky abort flags in the DSCR */
1590 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1591 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1592 if (retval != ERROR_OK)
1593 goto error_free_buff_r;
1594
1595 dpm->dscr = dscr;
1596
1597 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1598 /* Abort occurred - clear it and exit */
1599 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1600 armv8_dpm_handle_exception(dpm);
1601 goto error_free_buff_r;
1602 }
1603
1604 /* check if we need to copy aligned data by applying any shift necessary */
1605 if (tmp_buff) {
1606 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1607 free(tmp_buff);
1608 }
1609
1610 /* Done */
1611 return ERROR_OK;
1612
1613 error_unset_dtr_r:
1614 /* Unset DTR mode */
1615 mem_ap_read_atomic_u32(armv8->debug_ap,
1616 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1617 dscr = (dscr & ~DSCR_MA);
1618 mem_ap_write_atomic_u32(armv8->debug_ap,
1619 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1620 error_free_buff_r:
1621 LOG_ERROR("error");
1622 free(tmp_buff);
1623 return ERROR_FAIL;
1624 }
1625
1626 static int aarch64_read_phys_memory(struct target *target,
1627 target_addr_t address, uint32_t size,
1628 uint32_t count, uint8_t *buffer)
1629 {
1630 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1631
1632 if (count && buffer) {
1633 /* read memory through APB-AP */
1634 retval = aarch64_mmu_modify(target, 0);
1635 if (retval != ERROR_OK)
1636 return retval;
1637 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1638 }
1639 return retval;
1640 }
1641
1642 static int aarch64_read_memory(struct target *target, target_addr_t address,
1643 uint32_t size, uint32_t count, uint8_t *buffer)
1644 {
1645 int mmu_enabled = 0;
1646 int retval;
1647
1648 /* determine if MMU was enabled on target stop */
1649 retval = aarch64_mmu(target, &mmu_enabled);
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 if (mmu_enabled) {
1654 retval = aarch64_check_address(target, address);
1655 if (retval != ERROR_OK)
1656 return retval;
1657 /* enable MMU as we could have disabled it for phys access */
1658 retval = aarch64_mmu_modify(target, 1);
1659 if (retval != ERROR_OK)
1660 return retval;
1661 }
1662 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1663 }
1664
1665 static int aarch64_write_phys_memory(struct target *target,
1666 target_addr_t address, uint32_t size,
1667 uint32_t count, const uint8_t *buffer)
1668 {
1669 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1670
1671 if (count && buffer) {
1672 /* write memory through APB-AP */
1673 retval = aarch64_mmu_modify(target, 0);
1674 if (retval != ERROR_OK)
1675 return retval;
1676 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1677 }
1678
1679 return retval;
1680 }
1681
1682 static int aarch64_write_memory(struct target *target, target_addr_t address,
1683 uint32_t size, uint32_t count, const uint8_t *buffer)
1684 {
1685 int mmu_enabled = 0;
1686 int retval;
1687
1688 /* determine if MMU was enabled on target stop */
1689 retval = aarch64_mmu(target, &mmu_enabled);
1690 if (retval != ERROR_OK)
1691 return retval;
1692
1693 if (mmu_enabled) {
1694 retval = aarch64_check_address(target, address);
1695 if (retval != ERROR_OK)
1696 return retval;
1697 /* enable MMU as we could have disabled it for phys access */
1698 retval = aarch64_mmu_modify(target, 1);
1699 if (retval != ERROR_OK)
1700 return retval;
1701 }
1702 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1703 }
1704
1705 static int aarch64_handle_target_request(void *priv)
1706 {
1707 struct target *target = priv;
1708 struct armv8_common *armv8 = target_to_armv8(target);
1709 int retval;
1710
1711 if (!target_was_examined(target))
1712 return ERROR_OK;
1713 if (!target->dbg_msg_enabled)
1714 return ERROR_OK;
1715
1716 if (target->state == TARGET_RUNNING) {
1717 uint32_t request;
1718 uint32_t dscr;
1719 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1720 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1721
1722 /* check if we have data */
1723 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1724 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1725 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1726 if (retval == ERROR_OK) {
1727 target_request(target, request);
1728 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1729 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1730 }
1731 }
1732 }
1733
1734 return ERROR_OK;
1735 }
1736
1737 static int aarch64_examine_first(struct target *target)
1738 {
1739 struct aarch64_common *aarch64 = target_to_aarch64(target);
1740 struct armv8_common *armv8 = &aarch64->armv8_common;
1741 struct adiv5_dap *swjdp = armv8->arm.dap;
1742 int i;
1743 int retval = ERROR_OK;
1744 uint64_t debug, ttypr;
1745 uint32_t cpuid;
1746 uint32_t tmp0, tmp1;
1747 debug = ttypr = cpuid = 0;
1748
1749 /* We do one extra read to ensure DAP is configured,
1750 * we call ahbap_debugport_init(swjdp) instead
1751 */
1752 retval = dap_dp_init(swjdp);
1753 if (retval != ERROR_OK)
1754 return retval;
1755
1756 /* Search for the APB-AB - it is needed for access to debug registers */
1757 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1758 if (retval != ERROR_OK) {
1759 LOG_ERROR("Could not find APB-AP for debug access");
1760 return retval;
1761 }
1762
1763 retval = mem_ap_init(armv8->debug_ap);
1764 if (retval != ERROR_OK) {
1765 LOG_ERROR("Could not initialize the APB-AP");
1766 return retval;
1767 }
1768
1769 armv8->debug_ap->memaccess_tck = 80;
1770
1771 if (!target->dbgbase_set) {
1772 uint32_t dbgbase;
1773 /* Get ROM Table base */
1774 uint32_t apid;
1775 int32_t coreidx = target->coreid;
1776 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1777 if (retval != ERROR_OK)
1778 return retval;
1779 /* Lookup 0x15 -- Processor DAP */
1780 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1781 &armv8->debug_base, &coreidx);
1782 if (retval != ERROR_OK)
1783 return retval;
1784 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1785 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1786 } else
1787 armv8->debug_base = target->dbgbase;
1788
1789 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1790 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1791 if (retval != ERROR_OK) {
1792 LOG_DEBUG("LOCK debug access fail");
1793 return retval;
1794 }
1795
1796 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1797 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1798 if (retval != ERROR_OK) {
1799 LOG_DEBUG("Examine %s failed", "oslock");
1800 return retval;
1801 }
1802
1803 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1804 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1805 if (retval != ERROR_OK) {
1806 LOG_DEBUG("Examine %s failed", "CPUID");
1807 return retval;
1808 }
1809
1810 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1811 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1812 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1813 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1814 if (retval != ERROR_OK) {
1815 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1816 return retval;
1817 }
1818 ttypr |= tmp1;
1819 ttypr = (ttypr << 32) | tmp0;
1820
1821 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1822 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1823 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1824 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1825 if (retval != ERROR_OK) {
1826 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1827 return retval;
1828 }
1829 debug |= tmp1;
1830 debug = (debug << 32) | tmp0;
1831
1832 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1833 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1834 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1835
1836 if (target->ctibase == 0) {
1837 /* assume a v8 rom table layout */
1838 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1839 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1840 } else
1841 armv8->cti_base = target->ctibase;
1842
1843 armv8->arm.core_type = ARM_MODE_MON;
1844 retval = aarch64_dpm_setup(aarch64, debug);
1845 if (retval != ERROR_OK)
1846 return retval;
1847
1848 /* Setup Breakpoint Register Pairs */
1849 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1850 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1851 aarch64->brp_num_available = aarch64->brp_num;
1852 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1853 for (i = 0; i < aarch64->brp_num; i++) {
1854 aarch64->brp_list[i].used = 0;
1855 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1856 aarch64->brp_list[i].type = BRP_NORMAL;
1857 else
1858 aarch64->brp_list[i].type = BRP_CONTEXT;
1859 aarch64->brp_list[i].value = 0;
1860 aarch64->brp_list[i].control = 0;
1861 aarch64->brp_list[i].BRPn = i;
1862 }
1863
1864 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1865
1866 target_set_examined(target);
1867 return ERROR_OK;
1868 }
1869
1870 static int aarch64_examine(struct target *target)
1871 {
1872 int retval = ERROR_OK;
1873
1874 /* don't re-probe hardware after each reset */
1875 if (!target_was_examined(target))
1876 retval = aarch64_examine_first(target);
1877
1878 /* Configure core debug access */
1879 if (retval == ERROR_OK)
1880 retval = aarch64_init_debug_access(target);
1881
1882 return retval;
1883 }
1884
1885 /*
1886 * Cortex-A8 target creation and initialization
1887 */
1888
1889 static int aarch64_init_target(struct command_context *cmd_ctx,
1890 struct target *target)
1891 {
1892 /* examine_first() does a bunch of this */
1893 return ERROR_OK;
1894 }
1895
1896 static int aarch64_init_arch_info(struct target *target,
1897 struct aarch64_common *aarch64, struct jtag_tap *tap)
1898 {
1899 struct armv8_common *armv8 = &aarch64->armv8_common;
1900 struct adiv5_dap *dap = armv8->arm.dap;
1901
1902 armv8->arm.dap = dap;
1903
1904 /* Setup struct aarch64_common */
1905 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1906 /* tap has no dap initialized */
1907 if (!tap->dap) {
1908 tap->dap = dap_init();
1909
1910 /* Leave (only) generic DAP stuff for debugport_init() */
1911 tap->dap->tap = tap;
1912 }
1913
1914 armv8->arm.dap = tap->dap;
1915
1916 aarch64->fast_reg_read = 0;
1917
1918 /* register arch-specific functions */
1919 armv8->examine_debug_reason = NULL;
1920
1921 armv8->post_debug_entry = aarch64_post_debug_entry;
1922
1923 armv8->pre_restore_context = NULL;
1924
1925 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1926
1927 /* REVISIT v7a setup should be in a v7a-specific routine */
1928 armv8_init_arch_info(target, armv8);
1929 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1930
1931 return ERROR_OK;
1932 }
1933
1934 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1935 {
1936 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1937
1938 return aarch64_init_arch_info(target, aarch64, target->tap);
1939 }
1940
1941 static int aarch64_mmu(struct target *target, int *enabled)
1942 {
1943 if (target->state != TARGET_HALTED) {
1944 LOG_ERROR("%s: target not halted", __func__);
1945 return ERROR_TARGET_INVALID;
1946 }
1947
1948 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
1949 return ERROR_OK;
1950 }
1951
1952 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
1953 target_addr_t *phys)
1954 {
1955 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
1956 }
1957
1958 COMMAND_HANDLER(aarch64_handle_cache_info_command)
1959 {
1960 struct target *target = get_current_target(CMD_CTX);
1961 struct armv8_common *armv8 = target_to_armv8(target);
1962
1963 return armv8_handle_cache_info_command(CMD_CTX,
1964 &armv8->armv8_mmu.armv8_cache);
1965 }
1966
1967
1968 COMMAND_HANDLER(aarch64_handle_dbginit_command)
1969 {
1970 struct target *target = get_current_target(CMD_CTX);
1971 if (!target_was_examined(target)) {
1972 LOG_ERROR("target not examined yet");
1973 return ERROR_FAIL;
1974 }
1975
1976 return aarch64_init_debug_access(target);
1977 }
1978 COMMAND_HANDLER(aarch64_handle_smp_off_command)
1979 {
1980 struct target *target = get_current_target(CMD_CTX);
1981 /* check target is an smp target */
1982 struct target_list *head;
1983 struct target *curr;
1984 head = target->head;
1985 target->smp = 0;
1986 if (head != (struct target_list *)NULL) {
1987 while (head != (struct target_list *)NULL) {
1988 curr = head->target;
1989 curr->smp = 0;
1990 head = head->next;
1991 }
1992 /* fixes the target display to the debugger */
1993 target->gdb_service->target = target;
1994 }
1995 return ERROR_OK;
1996 }
1997
1998 COMMAND_HANDLER(aarch64_handle_smp_on_command)
1999 {
2000 struct target *target = get_current_target(CMD_CTX);
2001 struct target_list *head;
2002 struct target *curr;
2003 head = target->head;
2004 if (head != (struct target_list *)NULL) {
2005 target->smp = 1;
2006 while (head != (struct target_list *)NULL) {
2007 curr = head->target;
2008 curr->smp = 1;
2009 head = head->next;
2010 }
2011 }
2012 return ERROR_OK;
2013 }
2014
2015 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2016 {
2017 struct target *target = get_current_target(CMD_CTX);
2018 int retval = ERROR_OK;
2019 struct target_list *head;
2020 head = target->head;
2021 if (head != (struct target_list *)NULL) {
2022 if (CMD_ARGC == 1) {
2023 int coreid = 0;
2024 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2025 if (ERROR_OK != retval)
2026 return retval;
2027 target->gdb_service->core[1] = coreid;
2028
2029 }
2030 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2031 , target->gdb_service->core[1]);
2032 }
2033 return ERROR_OK;
2034 }
2035
2036 static const struct command_registration aarch64_exec_command_handlers[] = {
2037 {
2038 .name = "cache_info",
2039 .handler = aarch64_handle_cache_info_command,
2040 .mode = COMMAND_EXEC,
2041 .help = "display information about target caches",
2042 .usage = "",
2043 },
2044 {
2045 .name = "dbginit",
2046 .handler = aarch64_handle_dbginit_command,
2047 .mode = COMMAND_EXEC,
2048 .help = "Initialize core debug",
2049 .usage = "",
2050 },
2051 { .name = "smp_off",
2052 .handler = aarch64_handle_smp_off_command,
2053 .mode = COMMAND_EXEC,
2054 .help = "Stop smp handling",
2055 .usage = "",
2056 },
2057 {
2058 .name = "smp_on",
2059 .handler = aarch64_handle_smp_on_command,
2060 .mode = COMMAND_EXEC,
2061 .help = "Restart smp handling",
2062 .usage = "",
2063 },
2064 {
2065 .name = "smp_gdb",
2066 .handler = aarch64_handle_smp_gdb_command,
2067 .mode = COMMAND_EXEC,
2068 .help = "display/fix current core played to gdb",
2069 .usage = "",
2070 },
2071
2072
2073 COMMAND_REGISTRATION_DONE
2074 };
2075 static const struct command_registration aarch64_command_handlers[] = {
2076 {
2077 .chain = armv8_command_handlers,
2078 },
2079 {
2080 .name = "cortex_a",
2081 .mode = COMMAND_ANY,
2082 .help = "Cortex-A command group",
2083 .usage = "",
2084 .chain = aarch64_exec_command_handlers,
2085 },
2086 COMMAND_REGISTRATION_DONE
2087 };
2088
2089 struct target_type aarch64_target = {
2090 .name = "aarch64",
2091
2092 .poll = aarch64_poll,
2093 .arch_state = armv8_arch_state,
2094
2095 .halt = aarch64_halt,
2096 .resume = aarch64_resume,
2097 .step = aarch64_step,
2098
2099 .assert_reset = aarch64_assert_reset,
2100 .deassert_reset = aarch64_deassert_reset,
2101
2102 /* REVISIT allow exporting VFP3 registers ... */
2103 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2104
2105 .read_memory = aarch64_read_memory,
2106 .write_memory = aarch64_write_memory,
2107
2108 .checksum_memory = arm_checksum_memory,
2109 .blank_check_memory = arm_blank_check_memory,
2110
2111 .run_algorithm = armv4_5_run_algorithm,
2112
2113 .add_breakpoint = aarch64_add_breakpoint,
2114 .add_context_breakpoint = aarch64_add_context_breakpoint,
2115 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2116 .remove_breakpoint = aarch64_remove_breakpoint,
2117 .add_watchpoint = NULL,
2118 .remove_watchpoint = NULL,
2119
2120 .commands = aarch64_command_handlers,
2121 .target_create = aarch64_target_create,
2122 .init_target = aarch64_init_target,
2123 .examine = aarch64_examine,
2124
2125 .read_phys_memory = aarch64_read_phys_memory,
2126 .write_phys_memory = aarch64_write_phys_memory,
2127 .mmu = aarch64_mmu,
2128 .virt2phys = aarch64_virt2phys,
2129 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)