aarch64: clean up struct aarch64_common
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 enum arm_mode target_mode = ARM_MODE_ANY;
53 int retval = ERROR_OK;
54 uint32_t instr;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 target_mode = ARMV8_64_EL1H;
66 /* fall through */
67 case ARMV8_64_EL1T:
68 case ARMV8_64_EL1H:
69 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
70 break;
71 case ARMV8_64_EL2T:
72 case ARMV8_64_EL2H:
73 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
74 break;
75 case ARMV8_64_EL3H:
76 case ARMV8_64_EL3T:
77 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
78 break;
79
80 case ARM_MODE_SVC:
81 case ARM_MODE_ABT:
82 case ARM_MODE_FIQ:
83 case ARM_MODE_IRQ:
84 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
85 break;
86
87 default:
88 LOG_INFO("cannot read system control register in this mode");
89 return ERROR_FAIL;
90 }
91
92 if (target_mode != ARM_MODE_ANY)
93 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
94
95 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
96 if (retval != ERROR_OK)
97 return retval;
98
99 if (target_mode != ARM_MODE_ANY)
100 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
101 }
102
103 return retval;
104 }
105
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target *target, int enable)
110 {
111 struct aarch64_common *aarch64 = target_to_aarch64(target);
112 struct armv8_common *armv8 = &aarch64->armv8_common;
113 int retval = ERROR_OK;
114 uint32_t instr = 0;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U))
123 aarch64->system_control_reg_curr |= 0x1U;
124 } else {
125 if (aarch64->system_control_reg_curr & 0x4U) {
126 /* data cache is active */
127 aarch64->system_control_reg_curr &= ~0x4U;
128 /* flush data cache armv8 function to be called */
129 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
130 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
131 }
132 if ((aarch64->system_control_reg_curr & 0x1U)) {
133 aarch64->system_control_reg_curr &= ~0x1U;
134 }
135 }
136
137 switch (armv8->arm.core_mode) {
138 case ARMV8_64_EL0T:
139 case ARMV8_64_EL1T:
140 case ARMV8_64_EL1H:
141 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
142 break;
143 case ARMV8_64_EL2T:
144 case ARMV8_64_EL2H:
145 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
146 break;
147 case ARMV8_64_EL3H:
148 case ARMV8_64_EL3T:
149 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
150 break;
151 default:
152 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 break;
154 }
155
156 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
157 aarch64->system_control_reg_curr);
158 return retval;
159 }
160
161 /*
162 * Basic debug access, very low level assumes state is saved
163 */
164 static int aarch64_init_debug_access(struct target *target)
165 {
166 struct armv8_common *armv8 = target_to_armv8(target);
167 int retval;
168 uint32_t dummy;
169
170 LOG_DEBUG(" ");
171
172 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
173 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
174 if (retval != ERROR_OK) {
175 LOG_DEBUG("Examine %s failed", "oslock");
176 return retval;
177 }
178
179 /* Clear Sticky Power Down status Bit in PRSR to enable access to
180 the registers in the Core Power Domain */
181 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
182 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
183 if (retval != ERROR_OK)
184 return retval;
185
186 /*
187 * Static CTI configuration:
188 * Channel 0 -> trigger outputs HALT request to PE
189 * Channel 1 -> trigger outputs Resume request to PE
190 * Gate all channel trigger events from entering the CTM
191 */
192
193 /* Enable CTI */
194 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
195 armv8->cti_base + CTI_CTR, 1);
196 /* By default, gate all channel triggers to and from the CTM */
197 if (retval == ERROR_OK)
198 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
199 armv8->cti_base + CTI_GATE, 0);
200 /* output halt requests to PE on channel 0 trigger */
201 if (retval == ERROR_OK)
202 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
203 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
204 /* output restart requests to PE on channel 1 trigger */
205 if (retval == ERROR_OK)
206 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
207 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
208 if (retval != ERROR_OK)
209 return retval;
210
211 /* Resync breakpoint registers */
212
213 /* Since this is likely called from init or reset, update target state information*/
214 return aarch64_poll(target);
215 }
216
217 /* Write to memory mapped registers directly with no cache or mmu handling */
218 static int aarch64_dap_write_memap_register_u32(struct target *target,
219 uint32_t address,
220 uint32_t value)
221 {
222 int retval;
223 struct armv8_common *armv8 = target_to_armv8(target);
224
225 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
226
227 return retval;
228 }
229
230 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
231 {
232 struct arm_dpm *dpm = &a8->armv8_common.dpm;
233 int retval;
234
235 dpm->arm = &a8->armv8_common.arm;
236 dpm->didr = debug;
237
238 retval = armv8_dpm_setup(dpm);
239 if (retval == ERROR_OK)
240 retval = armv8_dpm_initialize(dpm);
241
242 return retval;
243 }
244
245 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
246 {
247 struct armv8_common *armv8 = target_to_armv8(target);
248 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
249 }
250
251 static struct target *get_aarch64(struct target *target, int32_t coreid)
252 {
253 struct target_list *head;
254 struct target *curr;
255
256 head = target->head;
257 while (head != (struct target_list *)NULL) {
258 curr = head->target;
259 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
260 return curr;
261 head = head->next;
262 }
263 return target;
264 }
265 static int aarch64_halt(struct target *target);
266
267 static int aarch64_halt_smp(struct target *target)
268 {
269 int retval = ERROR_OK;
270 struct target_list *head = target->head;
271
272 while (head != (struct target_list *)NULL) {
273 struct target *curr = head->target;
274 struct armv8_common *armv8 = target_to_armv8(curr);
275
276 /* open the gate for channel 0 to let HALT requests pass to the CTM */
277 if (curr->smp) {
278 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
279 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
280 if (retval == ERROR_OK)
281 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
282 }
283 if (retval != ERROR_OK)
284 break;
285
286 head = head->next;
287 }
288
289 /* halt the target PE */
290 if (retval == ERROR_OK)
291 retval = aarch64_halt(target);
292
293 return retval;
294 }
295
296 static int update_halt_gdb(struct target *target)
297 {
298 int retval = 0;
299 if (target->gdb_service && target->gdb_service->core[0] == -1) {
300 target->gdb_service->target = target;
301 target->gdb_service->core[0] = target->coreid;
302 retval += aarch64_halt_smp(target);
303 }
304 return retval;
305 }
306
307 /*
308 * Cortex-A8 Run control
309 */
310
311 static int aarch64_poll(struct target *target)
312 {
313 int retval = ERROR_OK;
314 uint32_t dscr;
315 struct aarch64_common *aarch64 = target_to_aarch64(target);
316 struct armv8_common *armv8 = &aarch64->armv8_common;
317 enum target_state prev_target_state = target->state;
318 /* toggle to another core is done by gdb as follow */
319 /* maint packet J core_id */
320 /* continue */
321 /* the next polling trigger an halt event sent to gdb */
322 if ((target->state == TARGET_HALTED) && (target->smp) &&
323 (target->gdb_service) &&
324 (target->gdb_service->target == NULL)) {
325 target->gdb_service->target =
326 get_aarch64(target, target->gdb_service->core[1]);
327 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
328 return retval;
329 }
330 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
331 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
332 if (retval != ERROR_OK)
333 return retval;
334
335 if (DSCR_RUN_MODE(dscr) == 0x3) {
336 if (prev_target_state != TARGET_HALTED) {
337 /* We have a halting debug event */
338 LOG_DEBUG("Target %s halted", target_name(target));
339 target->state = TARGET_HALTED;
340 if ((prev_target_state == TARGET_RUNNING)
341 || (prev_target_state == TARGET_UNKNOWN)
342 || (prev_target_state == TARGET_RESET)) {
343 retval = aarch64_debug_entry(target);
344 if (retval != ERROR_OK)
345 return retval;
346 if (target->smp) {
347 retval = update_halt_gdb(target);
348 if (retval != ERROR_OK)
349 return retval;
350 }
351 target_call_event_callbacks(target,
352 TARGET_EVENT_HALTED);
353 }
354 if (prev_target_state == TARGET_DEBUG_RUNNING) {
355 LOG_DEBUG(" ");
356
357 retval = aarch64_debug_entry(target);
358 if (retval != ERROR_OK)
359 return retval;
360 if (target->smp) {
361 retval = update_halt_gdb(target);
362 if (retval != ERROR_OK)
363 return retval;
364 }
365
366 target_call_event_callbacks(target,
367 TARGET_EVENT_DEBUG_HALTED);
368 }
369 }
370 } else
371 target->state = TARGET_RUNNING;
372
373 return retval;
374 }
375
376 static int aarch64_halt(struct target *target)
377 {
378 int retval = ERROR_OK;
379 uint32_t dscr;
380 struct armv8_common *armv8 = target_to_armv8(target);
381
382 /*
383 * add HDE in halting debug mode
384 */
385 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
386 if (retval != ERROR_OK)
387 return retval;
388
389 /* trigger an event on channel 0, this outputs a halt request to the PE */
390 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
391 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
392 if (retval != ERROR_OK)
393 return retval;
394
395 long long then = timeval_ms();
396 for (;; ) {
397 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
398 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 if ((dscr & DSCRV8_HALT_MASK) != 0)
402 break;
403 if (timeval_ms() > then + 1000) {
404 LOG_ERROR("Timeout waiting for halt");
405 return ERROR_FAIL;
406 }
407 }
408
409 target->debug_reason = DBG_REASON_DBGRQ;
410
411 return ERROR_OK;
412 }
413
414 static int aarch64_internal_restore(struct target *target, int current,
415 uint64_t *address, int handle_breakpoints, int debug_execution)
416 {
417 struct armv8_common *armv8 = target_to_armv8(target);
418 struct arm *arm = &armv8->arm;
419 int retval;
420 uint64_t resume_pc;
421
422 if (!debug_execution)
423 target_free_all_working_areas(target);
424
425 /* current = 1: continue on current pc, otherwise continue at <address> */
426 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
427 if (!current)
428 resume_pc = *address;
429 else
430 *address = resume_pc;
431
432 /* Make sure that the Armv7 gdb thumb fixups does not
433 * kill the return address
434 */
435 switch (arm->core_state) {
436 case ARM_STATE_ARM:
437 resume_pc &= 0xFFFFFFFC;
438 break;
439 case ARM_STATE_AARCH64:
440 resume_pc &= 0xFFFFFFFFFFFFFFFC;
441 break;
442 case ARM_STATE_THUMB:
443 case ARM_STATE_THUMB_EE:
444 /* When the return address is loaded into PC
445 * bit 0 must be 1 to stay in Thumb state
446 */
447 resume_pc |= 0x1;
448 break;
449 case ARM_STATE_JAZELLE:
450 LOG_ERROR("How do I resume into Jazelle state??");
451 return ERROR_FAIL;
452 }
453 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
454 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
455 arm->pc->dirty = 1;
456 arm->pc->valid = 1;
457
458 /* called it now before restoring context because it uses cpu
459 * register r0 for restoring system control register */
460 retval = aarch64_restore_system_control_reg(target);
461 if (retval == ERROR_OK)
462 retval = aarch64_restore_context(target, handle_breakpoints);
463
464 return retval;
465 }
466
467 static int aarch64_internal_restart(struct target *target, bool slave_pe)
468 {
469 struct armv8_common *armv8 = target_to_armv8(target);
470 struct arm *arm = &armv8->arm;
471 int retval;
472 uint32_t dscr;
473 /*
474 * * Restart core and wait for it to be started. Clear ITRen and sticky
475 * * exception flags: see ARMv7 ARM, C5.9.
476 *
477 * REVISIT: for single stepping, we probably want to
478 * disable IRQs by default, with optional override...
479 */
480
481 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
482 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
483 if (retval != ERROR_OK)
484 return retval;
485
486 if ((dscr & DSCR_ITE) == 0)
487 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
488 if ((dscr & DSCR_ERR) != 0)
489 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
490
491 /* make sure to acknowledge the halt event before resuming */
492 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
493 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
494
495 /*
496 * open the CTI gate for channel 1 so that the restart events
497 * get passed along to all PEs
498 */
499 if (retval == ERROR_OK)
500 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
501 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
502 if (retval != ERROR_OK)
503 return retval;
504
505 if (!slave_pe) {
506 /* trigger an event on channel 1, generates a restart request to the PE */
507 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
508 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
509 if (retval != ERROR_OK)
510 return retval;
511
512 long long then = timeval_ms();
513 for (;; ) {
514 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
515 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
516 if (retval != ERROR_OK)
517 return retval;
518 if ((dscr & DSCR_HDE) != 0)
519 break;
520 if (timeval_ms() > then + 1000) {
521 LOG_ERROR("Timeout waiting for resume");
522 return ERROR_FAIL;
523 }
524 }
525 }
526
527 target->debug_reason = DBG_REASON_NOTHALTED;
528 target->state = TARGET_RUNNING;
529
530 /* registers are now invalid */
531 register_cache_invalidate(arm->core_cache);
532 register_cache_invalidate(arm->core_cache->next);
533
534 return ERROR_OK;
535 }
536
537 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
538 {
539 int retval = 0;
540 struct target_list *head;
541 struct target *curr;
542 uint64_t address;
543 head = target->head;
544 while (head != (struct target_list *)NULL) {
545 curr = head->target;
546 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
547 /* resume current address , not in step mode */
548 retval += aarch64_internal_restore(curr, 1, &address,
549 handle_breakpoints, 0);
550 retval += aarch64_internal_restart(curr, true);
551 }
552 head = head->next;
553
554 }
555 return retval;
556 }
557
558 static int aarch64_resume(struct target *target, int current,
559 target_addr_t address, int handle_breakpoints, int debug_execution)
560 {
561 int retval = 0;
562 uint64_t addr = address;
563
564 /* dummy resume for smp toggle in order to reduce gdb impact */
565 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
566 /* simulate a start and halt of target */
567 target->gdb_service->target = NULL;
568 target->gdb_service->core[0] = target->gdb_service->core[1];
569 /* fake resume at next poll we play the target core[1], see poll*/
570 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
571 return 0;
572 }
573
574 if (target->state != TARGET_HALTED)
575 return ERROR_TARGET_NOT_HALTED;
576
577 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
578 debug_execution);
579 if (target->smp) {
580 target->gdb_service->core[0] = -1;
581 retval = aarch64_restore_smp(target, handle_breakpoints);
582 if (retval != ERROR_OK)
583 return retval;
584 }
585 aarch64_internal_restart(target, false);
586
587 if (!debug_execution) {
588 target->state = TARGET_RUNNING;
589 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
590 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
591 } else {
592 target->state = TARGET_DEBUG_RUNNING;
593 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
594 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
595 }
596
597 return ERROR_OK;
598 }
599
600 static int aarch64_debug_entry(struct target *target)
601 {
602 int retval = ERROR_OK;
603 struct armv8_common *armv8 = target_to_armv8(target);
604 struct arm_dpm *dpm = &armv8->dpm;
605 enum arm_state core_state;
606 uint32_t dscr;
607
608 /* make sure to clear all sticky errors */
609 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
610 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
611 if (retval == ERROR_OK)
612 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
613 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
614
615 if (retval != ERROR_OK)
616 return retval;
617
618 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
619
620 dpm->dscr = dscr;
621 core_state = armv8_dpm_get_core_state(dpm);
622 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
623 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
624
625 /* discard async exceptions */
626 if (retval == ERROR_OK)
627 retval = dpm->instr_cpsr_sync(dpm);
628
629 if (retval != ERROR_OK)
630 return retval;
631
632 /* Examine debug reason */
633 armv8_dpm_report_dscr(dpm, dscr);
634
635 /* save address of instruction that triggered the watchpoint? */
636 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
637 uint32_t tmp;
638 uint64_t wfar = 0;
639
640 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
641 armv8->debug_base + CPUV8_DBG_WFAR1,
642 &tmp);
643 if (retval != ERROR_OK)
644 return retval;
645 wfar = tmp;
646 wfar = (wfar << 32);
647 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
648 armv8->debug_base + CPUV8_DBG_WFAR0,
649 &tmp);
650 if (retval != ERROR_OK)
651 return retval;
652 wfar |= tmp;
653 armv8_dpm_report_wfar(&armv8->dpm, wfar);
654 }
655
656 retval = armv8_dpm_read_current_registers(&armv8->dpm);
657
658 if (retval == ERROR_OK && armv8->post_debug_entry)
659 retval = armv8->post_debug_entry(target);
660
661 return retval;
662 }
663
664 static int aarch64_post_debug_entry(struct target *target)
665 {
666 struct aarch64_common *aarch64 = target_to_aarch64(target);
667 struct armv8_common *armv8 = &aarch64->armv8_common;
668 int retval;
669 enum arm_mode target_mode = ARM_MODE_ANY;
670 uint32_t instr;
671
672 switch (armv8->arm.core_mode) {
673 case ARMV8_64_EL0T:
674 target_mode = ARMV8_64_EL1H;
675 /* fall through */
676 case ARMV8_64_EL1T:
677 case ARMV8_64_EL1H:
678 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
679 break;
680 case ARMV8_64_EL2T:
681 case ARMV8_64_EL2H:
682 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
683 break;
684 case ARMV8_64_EL3H:
685 case ARMV8_64_EL3T:
686 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
687 break;
688
689 case ARM_MODE_SVC:
690 case ARM_MODE_ABT:
691 case ARM_MODE_FIQ:
692 case ARM_MODE_IRQ:
693 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
694 break;
695
696 default:
697 LOG_INFO("cannot read system control register in this mode");
698 return ERROR_FAIL;
699 }
700
701 if (target_mode != ARM_MODE_ANY)
702 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
703
704 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
705 if (retval != ERROR_OK)
706 return retval;
707
708 if (target_mode != ARM_MODE_ANY)
709 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
710
711 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
712 aarch64->system_control_reg_curr = aarch64->system_control_reg;
713
714 if (armv8->armv8_mmu.armv8_cache.info == -1) {
715 armv8_identify_cache(armv8);
716 armv8_read_mpidr(armv8);
717 }
718
719 armv8->armv8_mmu.mmu_enabled =
720 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
721 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
722 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
723 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
724 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
725 return ERROR_OK;
726 }
727
728 static int aarch64_step(struct target *target, int current, target_addr_t address,
729 int handle_breakpoints)
730 {
731 struct armv8_common *armv8 = target_to_armv8(target);
732 int retval;
733 uint32_t edecr;
734
735 if (target->state != TARGET_HALTED) {
736 LOG_WARNING("target not halted");
737 return ERROR_TARGET_NOT_HALTED;
738 }
739
740 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
741 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
742 if (retval != ERROR_OK)
743 return retval;
744
745 /* make sure EDECR.SS is not set when restoring the register */
746 edecr &= ~0x4;
747
748 /* set EDECR.SS to enter hardware step mode */
749 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
750 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
751 if (retval != ERROR_OK)
752 return retval;
753
754 /* disable interrupts while stepping */
755 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
756 if (retval != ERROR_OK)
757 return ERROR_OK;
758
759 /* resume the target */
760 retval = aarch64_resume(target, current, address, 0, 0);
761 if (retval != ERROR_OK)
762 return retval;
763
764 long long then = timeval_ms();
765 while (target->state != TARGET_HALTED) {
766 retval = aarch64_poll(target);
767 if (retval != ERROR_OK)
768 return retval;
769 if (timeval_ms() > then + 1000) {
770 LOG_ERROR("timeout waiting for target halt");
771 return ERROR_FAIL;
772 }
773 }
774
775 /* restore EDECR */
776 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
777 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
778 if (retval != ERROR_OK)
779 return retval;
780
781 /* restore interrupts */
782 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
783 if (retval != ERROR_OK)
784 return ERROR_OK;
785
786 return ERROR_OK;
787 }
788
789 static int aarch64_restore_context(struct target *target, bool bpwp)
790 {
791 struct armv8_common *armv8 = target_to_armv8(target);
792
793 LOG_DEBUG("%s", target_name(target));
794
795 if (armv8->pre_restore_context)
796 armv8->pre_restore_context(target);
797
798 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
799 }
800
801 /*
802 * Cortex-A8 Breakpoint and watchpoint functions
803 */
804
805 /* Setup hardware Breakpoint Register Pair */
806 static int aarch64_set_breakpoint(struct target *target,
807 struct breakpoint *breakpoint, uint8_t matchmode)
808 {
809 int retval;
810 int brp_i = 0;
811 uint32_t control;
812 uint8_t byte_addr_select = 0x0F;
813 struct aarch64_common *aarch64 = target_to_aarch64(target);
814 struct armv8_common *armv8 = &aarch64->armv8_common;
815 struct aarch64_brp *brp_list = aarch64->brp_list;
816
817 if (breakpoint->set) {
818 LOG_WARNING("breakpoint already set");
819 return ERROR_OK;
820 }
821
822 if (breakpoint->type == BKPT_HARD) {
823 int64_t bpt_value;
824 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
825 brp_i++;
826 if (brp_i >= aarch64->brp_num) {
827 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
828 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
829 }
830 breakpoint->set = brp_i + 1;
831 if (breakpoint->length == 2)
832 byte_addr_select = (3 << (breakpoint->address & 0x02));
833 control = ((matchmode & 0x7) << 20)
834 | (1 << 13)
835 | (byte_addr_select << 5)
836 | (3 << 1) | 1;
837 brp_list[brp_i].used = 1;
838 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
839 brp_list[brp_i].control = control;
840 bpt_value = brp_list[brp_i].value;
841
842 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
843 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
844 (uint32_t)(bpt_value & 0xFFFFFFFF));
845 if (retval != ERROR_OK)
846 return retval;
847 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
848 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
849 (uint32_t)(bpt_value >> 32));
850 if (retval != ERROR_OK)
851 return retval;
852
853 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
854 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
855 brp_list[brp_i].control);
856 if (retval != ERROR_OK)
857 return retval;
858 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
859 brp_list[brp_i].control,
860 brp_list[brp_i].value);
861
862 } else if (breakpoint->type == BKPT_SOFT) {
863 uint8_t code[4];
864
865 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
866 retval = target_read_memory(target,
867 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
868 breakpoint->length, 1,
869 breakpoint->orig_instr);
870 if (retval != ERROR_OK)
871 return retval;
872
873 armv8_cache_d_inner_flush_virt(armv8,
874 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
875 breakpoint->length);
876
877 retval = target_write_memory(target,
878 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
879 breakpoint->length, 1, code);
880 if (retval != ERROR_OK)
881 return retval;
882
883 armv8_cache_d_inner_flush_virt(armv8,
884 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
885 breakpoint->length);
886
887 armv8_cache_i_inner_inval_virt(armv8,
888 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
889 breakpoint->length);
890
891 breakpoint->set = 0x11; /* Any nice value but 0 */
892 }
893
894 /* Ensure that halting debug mode is enable */
895 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
896 if (retval != ERROR_OK) {
897 LOG_DEBUG("Failed to set DSCR.HDE");
898 return retval;
899 }
900
901 return ERROR_OK;
902 }
903
904 static int aarch64_set_context_breakpoint(struct target *target,
905 struct breakpoint *breakpoint, uint8_t matchmode)
906 {
907 int retval = ERROR_FAIL;
908 int brp_i = 0;
909 uint32_t control;
910 uint8_t byte_addr_select = 0x0F;
911 struct aarch64_common *aarch64 = target_to_aarch64(target);
912 struct armv8_common *armv8 = &aarch64->armv8_common;
913 struct aarch64_brp *brp_list = aarch64->brp_list;
914
915 if (breakpoint->set) {
916 LOG_WARNING("breakpoint already set");
917 return retval;
918 }
919 /*check available context BRPs*/
920 while ((brp_list[brp_i].used ||
921 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
922 brp_i++;
923
924 if (brp_i >= aarch64->brp_num) {
925 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
926 return ERROR_FAIL;
927 }
928
929 breakpoint->set = brp_i + 1;
930 control = ((matchmode & 0x7) << 20)
931 | (1 << 13)
932 | (byte_addr_select << 5)
933 | (3 << 1) | 1;
934 brp_list[brp_i].used = 1;
935 brp_list[brp_i].value = (breakpoint->asid);
936 brp_list[brp_i].control = control;
937 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
938 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
939 brp_list[brp_i].value);
940 if (retval != ERROR_OK)
941 return retval;
942 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
943 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
944 brp_list[brp_i].control);
945 if (retval != ERROR_OK)
946 return retval;
947 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
948 brp_list[brp_i].control,
949 brp_list[brp_i].value);
950 return ERROR_OK;
951
952 }
953
954 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
955 {
956 int retval = ERROR_FAIL;
957 int brp_1 = 0; /* holds the contextID pair */
958 int brp_2 = 0; /* holds the IVA pair */
959 uint32_t control_CTX, control_IVA;
960 uint8_t CTX_byte_addr_select = 0x0F;
961 uint8_t IVA_byte_addr_select = 0x0F;
962 uint8_t CTX_machmode = 0x03;
963 uint8_t IVA_machmode = 0x01;
964 struct aarch64_common *aarch64 = target_to_aarch64(target);
965 struct armv8_common *armv8 = &aarch64->armv8_common;
966 struct aarch64_brp *brp_list = aarch64->brp_list;
967
968 if (breakpoint->set) {
969 LOG_WARNING("breakpoint already set");
970 return retval;
971 }
972 /*check available context BRPs*/
973 while ((brp_list[brp_1].used ||
974 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
975 brp_1++;
976
977 printf("brp(CTX) found num: %d\n", brp_1);
978 if (brp_1 >= aarch64->brp_num) {
979 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
980 return ERROR_FAIL;
981 }
982
983 while ((brp_list[brp_2].used ||
984 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
985 brp_2++;
986
987 printf("brp(IVA) found num: %d\n", brp_2);
988 if (brp_2 >= aarch64->brp_num) {
989 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
990 return ERROR_FAIL;
991 }
992
993 breakpoint->set = brp_1 + 1;
994 breakpoint->linked_BRP = brp_2;
995 control_CTX = ((CTX_machmode & 0x7) << 20)
996 | (brp_2 << 16)
997 | (0 << 14)
998 | (CTX_byte_addr_select << 5)
999 | (3 << 1) | 1;
1000 brp_list[brp_1].used = 1;
1001 brp_list[brp_1].value = (breakpoint->asid);
1002 brp_list[brp_1].control = control_CTX;
1003 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1004 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1005 brp_list[brp_1].value);
1006 if (retval != ERROR_OK)
1007 return retval;
1008 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1009 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1010 brp_list[brp_1].control);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 control_IVA = ((IVA_machmode & 0x7) << 20)
1015 | (brp_1 << 16)
1016 | (1 << 13)
1017 | (IVA_byte_addr_select << 5)
1018 | (3 << 1) | 1;
1019 brp_list[brp_2].used = 1;
1020 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1021 brp_list[brp_2].control = control_IVA;
1022 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1023 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1024 brp_list[brp_2].value & 0xFFFFFFFF);
1025 if (retval != ERROR_OK)
1026 return retval;
1027 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1028 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1029 brp_list[brp_2].value >> 32);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1033 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1034 brp_list[brp_2].control);
1035 if (retval != ERROR_OK)
1036 return retval;
1037
1038 return ERROR_OK;
1039 }
1040
1041 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1042 {
1043 int retval;
1044 struct aarch64_common *aarch64 = target_to_aarch64(target);
1045 struct armv8_common *armv8 = &aarch64->armv8_common;
1046 struct aarch64_brp *brp_list = aarch64->brp_list;
1047
1048 if (!breakpoint->set) {
1049 LOG_WARNING("breakpoint not set");
1050 return ERROR_OK;
1051 }
1052
1053 if (breakpoint->type == BKPT_HARD) {
1054 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1055 int brp_i = breakpoint->set - 1;
1056 int brp_j = breakpoint->linked_BRP;
1057 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1058 LOG_DEBUG("Invalid BRP number in breakpoint");
1059 return ERROR_OK;
1060 }
1061 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1062 brp_list[brp_i].control, brp_list[brp_i].value);
1063 brp_list[brp_i].used = 0;
1064 brp_list[brp_i].value = 0;
1065 brp_list[brp_i].control = 0;
1066 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1067 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1068 brp_list[brp_i].control);
1069 if (retval != ERROR_OK)
1070 return retval;
1071 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1072 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1073 (uint32_t)brp_list[brp_i].value);
1074 if (retval != ERROR_OK)
1075 return retval;
1076 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1077 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1078 (uint32_t)brp_list[brp_i].value);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1082 LOG_DEBUG("Invalid BRP number in breakpoint");
1083 return ERROR_OK;
1084 }
1085 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1086 brp_list[brp_j].control, brp_list[brp_j].value);
1087 brp_list[brp_j].used = 0;
1088 brp_list[brp_j].value = 0;
1089 brp_list[brp_j].control = 0;
1090 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1091 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1092 brp_list[brp_j].control);
1093 if (retval != ERROR_OK)
1094 return retval;
1095 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1096 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1097 (uint32_t)brp_list[brp_j].value);
1098 if (retval != ERROR_OK)
1099 return retval;
1100 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1101 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1102 (uint32_t)brp_list[brp_j].value);
1103 if (retval != ERROR_OK)
1104 return retval;
1105
1106 breakpoint->linked_BRP = 0;
1107 breakpoint->set = 0;
1108 return ERROR_OK;
1109
1110 } else {
1111 int brp_i = breakpoint->set - 1;
1112 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1113 LOG_DEBUG("Invalid BRP number in breakpoint");
1114 return ERROR_OK;
1115 }
1116 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1117 brp_list[brp_i].control, brp_list[brp_i].value);
1118 brp_list[brp_i].used = 0;
1119 brp_list[brp_i].value = 0;
1120 brp_list[brp_i].control = 0;
1121 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1122 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1123 brp_list[brp_i].control);
1124 if (retval != ERROR_OK)
1125 return retval;
1126 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1127 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1128 brp_list[brp_i].value);
1129 if (retval != ERROR_OK)
1130 return retval;
1131
1132 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1133 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1134 (uint32_t)brp_list[brp_i].value);
1135 if (retval != ERROR_OK)
1136 return retval;
1137 breakpoint->set = 0;
1138 return ERROR_OK;
1139 }
1140 } else {
1141 /* restore original instruction (kept in target endianness) */
1142
1143 armv8_cache_d_inner_flush_virt(armv8,
1144 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1145 breakpoint->length);
1146
1147 if (breakpoint->length == 4) {
1148 retval = target_write_memory(target,
1149 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1150 4, 1, breakpoint->orig_instr);
1151 if (retval != ERROR_OK)
1152 return retval;
1153 } else {
1154 retval = target_write_memory(target,
1155 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1156 2, 1, breakpoint->orig_instr);
1157 if (retval != ERROR_OK)
1158 return retval;
1159 }
1160
1161 armv8_cache_d_inner_flush_virt(armv8,
1162 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1163 breakpoint->length);
1164
1165 armv8_cache_i_inner_inval_virt(armv8,
1166 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1167 breakpoint->length);
1168 }
1169 breakpoint->set = 0;
1170
1171 return ERROR_OK;
1172 }
1173
1174 static int aarch64_add_breakpoint(struct target *target,
1175 struct breakpoint *breakpoint)
1176 {
1177 struct aarch64_common *aarch64 = target_to_aarch64(target);
1178
1179 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1180 LOG_INFO("no hardware breakpoint available");
1181 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1182 }
1183
1184 if (breakpoint->type == BKPT_HARD)
1185 aarch64->brp_num_available--;
1186
1187 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1188 }
1189
1190 static int aarch64_add_context_breakpoint(struct target *target,
1191 struct breakpoint *breakpoint)
1192 {
1193 struct aarch64_common *aarch64 = target_to_aarch64(target);
1194
1195 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1196 LOG_INFO("no hardware breakpoint available");
1197 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1198 }
1199
1200 if (breakpoint->type == BKPT_HARD)
1201 aarch64->brp_num_available--;
1202
1203 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1204 }
1205
1206 static int aarch64_add_hybrid_breakpoint(struct target *target,
1207 struct breakpoint *breakpoint)
1208 {
1209 struct aarch64_common *aarch64 = target_to_aarch64(target);
1210
1211 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1212 LOG_INFO("no hardware breakpoint available");
1213 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1214 }
1215
1216 if (breakpoint->type == BKPT_HARD)
1217 aarch64->brp_num_available--;
1218
1219 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1220 }
1221
1222
1223 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1224 {
1225 struct aarch64_common *aarch64 = target_to_aarch64(target);
1226
1227 #if 0
1228 /* It is perfectly possible to remove breakpoints while the target is running */
1229 if (target->state != TARGET_HALTED) {
1230 LOG_WARNING("target not halted");
1231 return ERROR_TARGET_NOT_HALTED;
1232 }
1233 #endif
1234
1235 if (breakpoint->set) {
1236 aarch64_unset_breakpoint(target, breakpoint);
1237 if (breakpoint->type == BKPT_HARD)
1238 aarch64->brp_num_available++;
1239 }
1240
1241 return ERROR_OK;
1242 }
1243
1244 /*
1245 * Cortex-A8 Reset functions
1246 */
1247
1248 static int aarch64_assert_reset(struct target *target)
1249 {
1250 struct armv8_common *armv8 = target_to_armv8(target);
1251
1252 LOG_DEBUG(" ");
1253
1254 /* FIXME when halt is requested, make it work somehow... */
1255
1256 /* Issue some kind of warm reset. */
1257 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1258 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1259 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1260 /* REVISIT handle "pulls" cases, if there's
1261 * hardware that needs them to work.
1262 */
1263 jtag_add_reset(0, 1);
1264 } else {
1265 LOG_ERROR("%s: how to reset?", target_name(target));
1266 return ERROR_FAIL;
1267 }
1268
1269 /* registers are now invalid */
1270 if (target_was_examined(target)) {
1271 register_cache_invalidate(armv8->arm.core_cache);
1272 register_cache_invalidate(armv8->arm.core_cache->next);
1273 }
1274
1275 target->state = TARGET_RESET;
1276
1277 return ERROR_OK;
1278 }
1279
1280 static int aarch64_deassert_reset(struct target *target)
1281 {
1282 int retval;
1283
1284 LOG_DEBUG(" ");
1285
1286 /* be certain SRST is off */
1287 jtag_add_reset(0, 0);
1288
1289 if (!target_was_examined(target))
1290 return ERROR_OK;
1291
1292 retval = aarch64_poll(target);
1293 if (retval != ERROR_OK)
1294 return retval;
1295
1296 if (target->reset_halt) {
1297 if (target->state != TARGET_HALTED) {
1298 LOG_WARNING("%s: ran after reset and before halt ...",
1299 target_name(target));
1300 retval = target_halt(target);
1301 if (retval != ERROR_OK)
1302 return retval;
1303 }
1304 }
1305
1306 return aarch64_init_debug_access(target);
1307 }
1308
1309 static int aarch64_write_apb_ap_memory(struct target *target,
1310 uint64_t address, uint32_t size,
1311 uint32_t count, const uint8_t *buffer)
1312 {
1313 /* write memory through APB-AP */
1314 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1315 struct armv8_common *armv8 = target_to_armv8(target);
1316 struct arm_dpm *dpm = &armv8->dpm;
1317 struct arm *arm = &armv8->arm;
1318 int total_bytes = count * size;
1319 int total_u32;
1320 int start_byte = address & 0x3;
1321 int end_byte = (address + total_bytes) & 0x3;
1322 struct reg *reg;
1323 uint32_t dscr;
1324 uint8_t *tmp_buff = NULL;
1325
1326 if (target->state != TARGET_HALTED) {
1327 LOG_WARNING("target not halted");
1328 return ERROR_TARGET_NOT_HALTED;
1329 }
1330
1331 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1332
1333 /* Mark register R0 as dirty, as it will be used
1334 * for transferring the data.
1335 * It will be restored automatically when exiting
1336 * debug mode
1337 */
1338 reg = armv8_reg_current(arm, 1);
1339 reg->dirty = true;
1340
1341 reg = armv8_reg_current(arm, 0);
1342 reg->dirty = true;
1343
1344 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1345
1346 /* The algorithm only copies 32 bit words, so the buffer
1347 * should be expanded to include the words at either end.
1348 * The first and last words will be read first to avoid
1349 * corruption if needed.
1350 */
1351 tmp_buff = malloc(total_u32 * 4);
1352
1353 if ((start_byte != 0) && (total_u32 > 1)) {
1354 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1355 * the other bytes in the word.
1356 */
1357 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1358 if (retval != ERROR_OK)
1359 goto error_free_buff_w;
1360 }
1361
1362 /* If end of write is not aligned, or the write is less than 4 bytes */
1363 if ((end_byte != 0) ||
1364 ((total_u32 == 1) && (total_bytes != 4))) {
1365
1366 /* Read the last word to avoid corruption during 32 bit write */
1367 int mem_offset = (total_u32-1) * 4;
1368 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1369 if (retval != ERROR_OK)
1370 goto error_free_buff_w;
1371 }
1372
1373 /* Copy the write buffer over the top of the temporary buffer */
1374 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1375
1376 /* We now have a 32 bit aligned buffer that can be written */
1377
1378 /* Read DSCR */
1379 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1380 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1381 if (retval != ERROR_OK)
1382 goto error_free_buff_w;
1383
1384 /* Set Normal access mode */
1385 dscr = (dscr & ~DSCR_MA);
1386 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1387 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1388
1389 if (arm->core_state == ARM_STATE_AARCH64) {
1390 /* Write X0 with value 'address' using write procedure */
1391 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1392 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1393 retval = dpm->instr_write_data_dcc_64(dpm,
1394 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1395 } else {
1396 /* Write R0 with value 'address' using write procedure */
1397 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1398 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1399 dpm->instr_write_data_dcc(dpm,
1400 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1401
1402 }
1403 /* Step 1.d - Change DCC to memory mode */
1404 dscr = dscr | DSCR_MA;
1405 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1406 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1407 if (retval != ERROR_OK)
1408 goto error_unset_dtr_w;
1409
1410
1411 /* Step 2.a - Do the write */
1412 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1413 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1414 if (retval != ERROR_OK)
1415 goto error_unset_dtr_w;
1416
1417 /* Step 3.a - Switch DTR mode back to Normal mode */
1418 dscr = (dscr & ~DSCR_MA);
1419 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1420 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1421 if (retval != ERROR_OK)
1422 goto error_unset_dtr_w;
1423
1424 /* Check for sticky abort flags in the DSCR */
1425 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1426 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1427 if (retval != ERROR_OK)
1428 goto error_free_buff_w;
1429
1430 dpm->dscr = dscr;
1431 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1432 /* Abort occurred - clear it and exit */
1433 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1434 armv8_dpm_handle_exception(dpm);
1435 goto error_free_buff_w;
1436 }
1437
1438 /* Done */
1439 free(tmp_buff);
1440 return ERROR_OK;
1441
1442 error_unset_dtr_w:
1443 /* Unset DTR mode */
1444 mem_ap_read_atomic_u32(armv8->debug_ap,
1445 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1446 dscr = (dscr & ~DSCR_MA);
1447 mem_ap_write_atomic_u32(armv8->debug_ap,
1448 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1449 error_free_buff_w:
1450 LOG_ERROR("error");
1451 free(tmp_buff);
1452 return ERROR_FAIL;
1453 }
1454
1455 static int aarch64_read_apb_ap_memory(struct target *target,
1456 target_addr_t address, uint32_t size,
1457 uint32_t count, uint8_t *buffer)
1458 {
1459 /* read memory through APB-AP */
1460 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1461 struct armv8_common *armv8 = target_to_armv8(target);
1462 struct arm_dpm *dpm = &armv8->dpm;
1463 struct arm *arm = &armv8->arm;
1464 int total_bytes = count * size;
1465 int total_u32;
1466 int start_byte = address & 0x3;
1467 int end_byte = (address + total_bytes) & 0x3;
1468 struct reg *reg;
1469 uint32_t dscr;
1470 uint8_t *tmp_buff = NULL;
1471 uint8_t *u8buf_ptr;
1472 uint32_t value;
1473
1474 if (target->state != TARGET_HALTED) {
1475 LOG_WARNING("target not halted");
1476 return ERROR_TARGET_NOT_HALTED;
1477 }
1478
1479 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1480 /* Mark register X0, X1 as dirty, as it will be used
1481 * for transferring the data.
1482 * It will be restored automatically when exiting
1483 * debug mode
1484 */
1485 reg = armv8_reg_current(arm, 1);
1486 reg->dirty = true;
1487
1488 reg = armv8_reg_current(arm, 0);
1489 reg->dirty = true;
1490
1491 /* Read DSCR */
1492 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1493 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1494
1495 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1496
1497 /* Set Normal access mode */
1498 dscr = (dscr & ~DSCR_MA);
1499 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1500 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1501
1502 if (arm->core_state == ARM_STATE_AARCH64) {
1503 /* Write X0 with value 'address' using write procedure */
1504 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1505 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1506 retval += dpm->instr_write_data_dcc_64(dpm,
1507 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1508 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1509 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1510 /* Step 1.e - Change DCC to memory mode */
1511 dscr = dscr | DSCR_MA;
1512 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1513 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1514 /* Step 1.f - read DBGDTRTX and discard the value */
1515 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1516 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1517 } else {
1518 /* Write R0 with value 'address' using write procedure */
1519 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1520 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1521 retval += dpm->instr_write_data_dcc(dpm,
1522 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1523 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1524 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1525 /* Step 1.e - Change DCC to memory mode */
1526 dscr = dscr | DSCR_MA;
1527 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1528 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1529 /* Step 1.f - read DBGDTRTX and discard the value */
1530 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1531 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1532
1533 }
1534 if (retval != ERROR_OK)
1535 goto error_unset_dtr_r;
1536
1537 /* Optimize the read as much as we can, either way we read in a single pass */
1538 if ((start_byte) || (end_byte)) {
1539 /* The algorithm only copies 32 bit words, so the buffer
1540 * should be expanded to include the words at either end.
1541 * The first and last words will be read into a temp buffer
1542 * to avoid corruption
1543 */
1544 tmp_buff = malloc(total_u32 * 4);
1545 if (!tmp_buff)
1546 goto error_unset_dtr_r;
1547
1548 /* use the tmp buffer to read the entire data */
1549 u8buf_ptr = tmp_buff;
1550 } else
1551 /* address and read length are aligned so read directly into the passed buffer */
1552 u8buf_ptr = buffer;
1553
1554 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1555 * Abort flags are sticky, so can be read at end of transactions
1556 *
1557 * This data is read in aligned to 32 bit boundary.
1558 */
1559
1560 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1561 * increments X0 by 4. */
1562 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1563 armv8->debug_base + CPUV8_DBG_DTRTX);
1564 if (retval != ERROR_OK)
1565 goto error_unset_dtr_r;
1566
1567 /* Step 3.a - set DTR access mode back to Normal mode */
1568 dscr = (dscr & ~DSCR_MA);
1569 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1570 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1571 if (retval != ERROR_OK)
1572 goto error_free_buff_r;
1573
1574 /* Step 3.b - read DBGDTRTX for the final value */
1575 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1576 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1577 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1578
1579 /* Check for sticky abort flags in the DSCR */
1580 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1581 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1582 if (retval != ERROR_OK)
1583 goto error_free_buff_r;
1584
1585 dpm->dscr = dscr;
1586
1587 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1588 /* Abort occurred - clear it and exit */
1589 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1590 armv8_dpm_handle_exception(dpm);
1591 goto error_free_buff_r;
1592 }
1593
1594 /* check if we need to copy aligned data by applying any shift necessary */
1595 if (tmp_buff) {
1596 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1597 free(tmp_buff);
1598 }
1599
1600 /* Done */
1601 return ERROR_OK;
1602
1603 error_unset_dtr_r:
1604 /* Unset DTR mode */
1605 mem_ap_read_atomic_u32(armv8->debug_ap,
1606 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1607 dscr = (dscr & ~DSCR_MA);
1608 mem_ap_write_atomic_u32(armv8->debug_ap,
1609 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1610 error_free_buff_r:
1611 LOG_ERROR("error");
1612 free(tmp_buff);
1613 return ERROR_FAIL;
1614 }
1615
1616 static int aarch64_read_phys_memory(struct target *target,
1617 target_addr_t address, uint32_t size,
1618 uint32_t count, uint8_t *buffer)
1619 {
1620 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1621
1622 if (count && buffer) {
1623 /* read memory through APB-AP */
1624 retval = aarch64_mmu_modify(target, 0);
1625 if (retval != ERROR_OK)
1626 return retval;
1627 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1628 }
1629 return retval;
1630 }
1631
1632 static int aarch64_read_memory(struct target *target, target_addr_t address,
1633 uint32_t size, uint32_t count, uint8_t *buffer)
1634 {
1635 int mmu_enabled = 0;
1636 int retval;
1637
1638 /* determine if MMU was enabled on target stop */
1639 retval = aarch64_mmu(target, &mmu_enabled);
1640 if (retval != ERROR_OK)
1641 return retval;
1642
1643 if (mmu_enabled) {
1644 /* enable MMU as we could have disabled it for phys access */
1645 retval = aarch64_mmu_modify(target, 1);
1646 if (retval != ERROR_OK)
1647 return retval;
1648 }
1649 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1650 }
1651
1652 static int aarch64_write_phys_memory(struct target *target,
1653 target_addr_t address, uint32_t size,
1654 uint32_t count, const uint8_t *buffer)
1655 {
1656 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1657
1658 if (count && buffer) {
1659 /* write memory through APB-AP */
1660 retval = aarch64_mmu_modify(target, 0);
1661 if (retval != ERROR_OK)
1662 return retval;
1663 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1664 }
1665
1666 return retval;
1667 }
1668
1669 static int aarch64_write_memory(struct target *target, target_addr_t address,
1670 uint32_t size, uint32_t count, const uint8_t *buffer)
1671 {
1672 int mmu_enabled = 0;
1673 int retval;
1674
1675 /* determine if MMU was enabled on target stop */
1676 retval = aarch64_mmu(target, &mmu_enabled);
1677 if (retval != ERROR_OK)
1678 return retval;
1679
1680 if (mmu_enabled) {
1681 /* enable MMU as we could have disabled it for phys access */
1682 retval = aarch64_mmu_modify(target, 1);
1683 if (retval != ERROR_OK)
1684 return retval;
1685 }
1686 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1687 }
1688
1689 static int aarch64_handle_target_request(void *priv)
1690 {
1691 struct target *target = priv;
1692 struct armv8_common *armv8 = target_to_armv8(target);
1693 int retval;
1694
1695 if (!target_was_examined(target))
1696 return ERROR_OK;
1697 if (!target->dbg_msg_enabled)
1698 return ERROR_OK;
1699
1700 if (target->state == TARGET_RUNNING) {
1701 uint32_t request;
1702 uint32_t dscr;
1703 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1704 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1705
1706 /* check if we have data */
1707 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1708 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1709 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1710 if (retval == ERROR_OK) {
1711 target_request(target, request);
1712 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1713 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1714 }
1715 }
1716 }
1717
1718 return ERROR_OK;
1719 }
1720
1721 static int aarch64_examine_first(struct target *target)
1722 {
1723 struct aarch64_common *aarch64 = target_to_aarch64(target);
1724 struct armv8_common *armv8 = &aarch64->armv8_common;
1725 struct adiv5_dap *swjdp = armv8->arm.dap;
1726 int i;
1727 int retval = ERROR_OK;
1728 uint64_t debug, ttypr;
1729 uint32_t cpuid;
1730 uint32_t tmp0, tmp1;
1731 debug = ttypr = cpuid = 0;
1732
1733 /* We do one extra read to ensure DAP is configured,
1734 * we call ahbap_debugport_init(swjdp) instead
1735 */
1736 retval = dap_dp_init(swjdp);
1737 if (retval != ERROR_OK)
1738 return retval;
1739
1740 /* Search for the APB-AB - it is needed for access to debug registers */
1741 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1742 if (retval != ERROR_OK) {
1743 LOG_ERROR("Could not find APB-AP for debug access");
1744 return retval;
1745 }
1746
1747 retval = mem_ap_init(armv8->debug_ap);
1748 if (retval != ERROR_OK) {
1749 LOG_ERROR("Could not initialize the APB-AP");
1750 return retval;
1751 }
1752
1753 armv8->debug_ap->memaccess_tck = 80;
1754
1755 if (!target->dbgbase_set) {
1756 uint32_t dbgbase;
1757 /* Get ROM Table base */
1758 uint32_t apid;
1759 int32_t coreidx = target->coreid;
1760 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1761 if (retval != ERROR_OK)
1762 return retval;
1763 /* Lookup 0x15 -- Processor DAP */
1764 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1765 &armv8->debug_base, &coreidx);
1766 if (retval != ERROR_OK)
1767 return retval;
1768 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1769 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1770 } else
1771 armv8->debug_base = target->dbgbase;
1772
1773 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1774 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1775 if (retval != ERROR_OK) {
1776 LOG_DEBUG("LOCK debug access fail");
1777 return retval;
1778 }
1779
1780 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1781 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1782 if (retval != ERROR_OK) {
1783 LOG_DEBUG("Examine %s failed", "oslock");
1784 return retval;
1785 }
1786
1787 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1788 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1789 if (retval != ERROR_OK) {
1790 LOG_DEBUG("Examine %s failed", "CPUID");
1791 return retval;
1792 }
1793
1794 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1795 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1796 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1797 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1798 if (retval != ERROR_OK) {
1799 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1800 return retval;
1801 }
1802 ttypr |= tmp1;
1803 ttypr = (ttypr << 32) | tmp0;
1804
1805 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1806 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1807 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1808 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1809 if (retval != ERROR_OK) {
1810 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1811 return retval;
1812 }
1813 debug |= tmp1;
1814 debug = (debug << 32) | tmp0;
1815
1816 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1817 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1818 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1819
1820 if (target->ctibase == 0) {
1821 /* assume a v8 rom table layout */
1822 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1823 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1824 } else
1825 armv8->cti_base = target->ctibase;
1826
1827 armv8->arm.core_type = ARM_MODE_MON;
1828 retval = aarch64_dpm_setup(aarch64, debug);
1829 if (retval != ERROR_OK)
1830 return retval;
1831
1832 /* Setup Breakpoint Register Pairs */
1833 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1834 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1835 aarch64->brp_num_available = aarch64->brp_num;
1836 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1837 for (i = 0; i < aarch64->brp_num; i++) {
1838 aarch64->brp_list[i].used = 0;
1839 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1840 aarch64->brp_list[i].type = BRP_NORMAL;
1841 else
1842 aarch64->brp_list[i].type = BRP_CONTEXT;
1843 aarch64->brp_list[i].value = 0;
1844 aarch64->brp_list[i].control = 0;
1845 aarch64->brp_list[i].BRPn = i;
1846 }
1847
1848 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1849
1850 target_set_examined(target);
1851 return ERROR_OK;
1852 }
1853
1854 static int aarch64_examine(struct target *target)
1855 {
1856 int retval = ERROR_OK;
1857
1858 /* don't re-probe hardware after each reset */
1859 if (!target_was_examined(target))
1860 retval = aarch64_examine_first(target);
1861
1862 /* Configure core debug access */
1863 if (retval == ERROR_OK)
1864 retval = aarch64_init_debug_access(target);
1865
1866 return retval;
1867 }
1868
1869 /*
1870 * Cortex-A8 target creation and initialization
1871 */
1872
1873 static int aarch64_init_target(struct command_context *cmd_ctx,
1874 struct target *target)
1875 {
1876 /* examine_first() does a bunch of this */
1877 return ERROR_OK;
1878 }
1879
1880 static int aarch64_init_arch_info(struct target *target,
1881 struct aarch64_common *aarch64, struct jtag_tap *tap)
1882 {
1883 struct armv8_common *armv8 = &aarch64->armv8_common;
1884 struct adiv5_dap *dap = armv8->arm.dap;
1885
1886 armv8->arm.dap = dap;
1887
1888 /* Setup struct aarch64_common */
1889 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1890 /* tap has no dap initialized */
1891 if (!tap->dap) {
1892 tap->dap = dap_init();
1893
1894 /* Leave (only) generic DAP stuff for debugport_init() */
1895 tap->dap->tap = tap;
1896 }
1897
1898 armv8->arm.dap = tap->dap;
1899
1900 /* register arch-specific functions */
1901 armv8->examine_debug_reason = NULL;
1902
1903 armv8->post_debug_entry = aarch64_post_debug_entry;
1904
1905 armv8->pre_restore_context = NULL;
1906
1907 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1908
1909 /* REVISIT v7a setup should be in a v7a-specific routine */
1910 armv8_init_arch_info(target, armv8);
1911 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1912
1913 return ERROR_OK;
1914 }
1915
1916 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1917 {
1918 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1919
1920 return aarch64_init_arch_info(target, aarch64, target->tap);
1921 }
1922
1923 static int aarch64_mmu(struct target *target, int *enabled)
1924 {
1925 if (target->state != TARGET_HALTED) {
1926 LOG_ERROR("%s: target not halted", __func__);
1927 return ERROR_TARGET_INVALID;
1928 }
1929
1930 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
1931 return ERROR_OK;
1932 }
1933
1934 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
1935 target_addr_t *phys)
1936 {
1937 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
1938 }
1939
1940 COMMAND_HANDLER(aarch64_handle_cache_info_command)
1941 {
1942 struct target *target = get_current_target(CMD_CTX);
1943 struct armv8_common *armv8 = target_to_armv8(target);
1944
1945 return armv8_handle_cache_info_command(CMD_CTX,
1946 &armv8->armv8_mmu.armv8_cache);
1947 }
1948
1949
1950 COMMAND_HANDLER(aarch64_handle_dbginit_command)
1951 {
1952 struct target *target = get_current_target(CMD_CTX);
1953 if (!target_was_examined(target)) {
1954 LOG_ERROR("target not examined yet");
1955 return ERROR_FAIL;
1956 }
1957
1958 return aarch64_init_debug_access(target);
1959 }
1960 COMMAND_HANDLER(aarch64_handle_smp_off_command)
1961 {
1962 struct target *target = get_current_target(CMD_CTX);
1963 /* check target is an smp target */
1964 struct target_list *head;
1965 struct target *curr;
1966 head = target->head;
1967 target->smp = 0;
1968 if (head != (struct target_list *)NULL) {
1969 while (head != (struct target_list *)NULL) {
1970 curr = head->target;
1971 curr->smp = 0;
1972 head = head->next;
1973 }
1974 /* fixes the target display to the debugger */
1975 target->gdb_service->target = target;
1976 }
1977 return ERROR_OK;
1978 }
1979
1980 COMMAND_HANDLER(aarch64_handle_smp_on_command)
1981 {
1982 struct target *target = get_current_target(CMD_CTX);
1983 struct target_list *head;
1984 struct target *curr;
1985 head = target->head;
1986 if (head != (struct target_list *)NULL) {
1987 target->smp = 1;
1988 while (head != (struct target_list *)NULL) {
1989 curr = head->target;
1990 curr->smp = 1;
1991 head = head->next;
1992 }
1993 }
1994 return ERROR_OK;
1995 }
1996
1997 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
1998 {
1999 struct target *target = get_current_target(CMD_CTX);
2000 int retval = ERROR_OK;
2001 struct target_list *head;
2002 head = target->head;
2003 if (head != (struct target_list *)NULL) {
2004 if (CMD_ARGC == 1) {
2005 int coreid = 0;
2006 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2007 if (ERROR_OK != retval)
2008 return retval;
2009 target->gdb_service->core[1] = coreid;
2010
2011 }
2012 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2013 , target->gdb_service->core[1]);
2014 }
2015 return ERROR_OK;
2016 }
2017
2018 static const struct command_registration aarch64_exec_command_handlers[] = {
2019 {
2020 .name = "cache_info",
2021 .handler = aarch64_handle_cache_info_command,
2022 .mode = COMMAND_EXEC,
2023 .help = "display information about target caches",
2024 .usage = "",
2025 },
2026 {
2027 .name = "dbginit",
2028 .handler = aarch64_handle_dbginit_command,
2029 .mode = COMMAND_EXEC,
2030 .help = "Initialize core debug",
2031 .usage = "",
2032 },
2033 { .name = "smp_off",
2034 .handler = aarch64_handle_smp_off_command,
2035 .mode = COMMAND_EXEC,
2036 .help = "Stop smp handling",
2037 .usage = "",
2038 },
2039 {
2040 .name = "smp_on",
2041 .handler = aarch64_handle_smp_on_command,
2042 .mode = COMMAND_EXEC,
2043 .help = "Restart smp handling",
2044 .usage = "",
2045 },
2046 {
2047 .name = "smp_gdb",
2048 .handler = aarch64_handle_smp_gdb_command,
2049 .mode = COMMAND_EXEC,
2050 .help = "display/fix current core played to gdb",
2051 .usage = "",
2052 },
2053
2054
2055 COMMAND_REGISTRATION_DONE
2056 };
2057 static const struct command_registration aarch64_command_handlers[] = {
2058 {
2059 .chain = armv8_command_handlers,
2060 },
2061 {
2062 .name = "aarch64",
2063 .mode = COMMAND_ANY,
2064 .help = "Aarch64 command group",
2065 .usage = "",
2066 .chain = aarch64_exec_command_handlers,
2067 },
2068 COMMAND_REGISTRATION_DONE
2069 };
2070
2071 struct target_type aarch64_target = {
2072 .name = "aarch64",
2073
2074 .poll = aarch64_poll,
2075 .arch_state = armv8_arch_state,
2076
2077 .halt = aarch64_halt,
2078 .resume = aarch64_resume,
2079 .step = aarch64_step,
2080
2081 .assert_reset = aarch64_assert_reset,
2082 .deassert_reset = aarch64_deassert_reset,
2083
2084 /* REVISIT allow exporting VFP3 registers ... */
2085 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2086
2087 .read_memory = aarch64_read_memory,
2088 .write_memory = aarch64_write_memory,
2089
2090 .add_breakpoint = aarch64_add_breakpoint,
2091 .add_context_breakpoint = aarch64_add_context_breakpoint,
2092 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2093 .remove_breakpoint = aarch64_remove_breakpoint,
2094 .add_watchpoint = NULL,
2095 .remove_watchpoint = NULL,
2096
2097 .commands = aarch64_command_handlers,
2098 .target_create = aarch64_target_create,
2099 .init_target = aarch64_init_target,
2100 .examine = aarch64_examine,
2101
2102 .read_phys_memory = aarch64_read_phys_memory,
2103 .write_phys_memory = aarch64_write_phys_memory,
2104 .mmu = aarch64_mmu,
2105 .virt2phys = aarch64_virt2phys,
2106 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)