aarch64: refactor SCTLR manipulation
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 enum arm_mode target_mode = ARM_MODE_ANY;
53 int retval = ERROR_OK;
54 uint32_t instr;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 target_mode = ARMV8_64_EL1H;
66 /* fall through */
67 case ARMV8_64_EL1T:
68 case ARMV8_64_EL1H:
69 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
70 break;
71 case ARMV8_64_EL2T:
72 case ARMV8_64_EL2H:
73 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
74 break;
75 case ARMV8_64_EL3H:
76 case ARMV8_64_EL3T:
77 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
78 break;
79
80 case ARM_MODE_SVC:
81 case ARM_MODE_ABT:
82 case ARM_MODE_FIQ:
83 case ARM_MODE_IRQ:
84 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
85 break;
86
87 default:
88 LOG_INFO("cannot read system control register in this mode");
89 return ERROR_FAIL;
90 }
91
92 if (target_mode != ARM_MODE_ANY)
93 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
94
95 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
96 if (retval != ERROR_OK)
97 return retval;
98
99 if (target_mode != ARM_MODE_ANY)
100 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
101 }
102
103 return retval;
104 }
105
106 /* check address before aarch64_apb read write access with mmu on
107 * remove apb predictible data abort */
108 static int aarch64_check_address(struct target *target, uint32_t address)
109 {
110 /* TODO */
111 return ERROR_OK;
112 }
113 /* modify system_control_reg in order to enable or disable mmu for :
114 * - virt2phys address conversion
115 * - read or write memory in phys or virt address */
116 static int aarch64_mmu_modify(struct target *target, int enable)
117 {
118 struct aarch64_common *aarch64 = target_to_aarch64(target);
119 struct armv8_common *armv8 = &aarch64->armv8_common;
120 int retval = ERROR_OK;
121 uint32_t instr = 0;
122
123 if (enable) {
124 /* if mmu enabled at target stop and mmu not enable */
125 if (!(aarch64->system_control_reg & 0x1U)) {
126 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
127 return ERROR_FAIL;
128 }
129 if (!(aarch64->system_control_reg_curr & 0x1U))
130 aarch64->system_control_reg_curr |= 0x1U;
131 } else {
132 if (aarch64->system_control_reg_curr & 0x4U) {
133 /* data cache is active */
134 aarch64->system_control_reg_curr &= ~0x4U;
135 /* flush data cache armv8 function to be called */
136 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
137 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
138 }
139 if ((aarch64->system_control_reg_curr & 0x1U)) {
140 aarch64->system_control_reg_curr &= ~0x1U;
141 }
142 }
143
144 switch (armv8->arm.core_mode) {
145 case ARMV8_64_EL0T:
146 case ARMV8_64_EL1T:
147 case ARMV8_64_EL1H:
148 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
149 break;
150 case ARMV8_64_EL2T:
151 case ARMV8_64_EL2H:
152 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
153 break;
154 case ARMV8_64_EL3H:
155 case ARMV8_64_EL3T:
156 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
157 break;
158 default:
159 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
160 break;
161 }
162
163 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
164 aarch64->system_control_reg_curr);
165 return retval;
166 }
167
168 /*
169 * Basic debug access, very low level assumes state is saved
170 */
171 static int aarch64_init_debug_access(struct target *target)
172 {
173 struct armv8_common *armv8 = target_to_armv8(target);
174 int retval;
175 uint32_t dummy;
176
177 LOG_DEBUG(" ");
178
179 /* Clear Sticky Power Down status Bit in PRSR to enable access to
180 the registers in the Core Power Domain */
181 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
182 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
183 if (retval != ERROR_OK)
184 return retval;
185
186 /*
187 * Static CTI configuration:
188 * Channel 0 -> trigger outputs HALT request to PE
189 * Channel 1 -> trigger outputs Resume request to PE
190 * Gate all channel trigger events from entering the CTM
191 */
192
193 /* Enable CTI */
194 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
195 armv8->cti_base + CTI_CTR, 1);
196 /* By default, gate all channel triggers to and from the CTM */
197 if (retval == ERROR_OK)
198 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
199 armv8->cti_base + CTI_GATE, 0);
200 /* output halt requests to PE on channel 0 trigger */
201 if (retval == ERROR_OK)
202 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
203 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
204 /* output restart requests to PE on channel 1 trigger */
205 if (retval == ERROR_OK)
206 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
207 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
208 if (retval != ERROR_OK)
209 return retval;
210
211 /* Resync breakpoint registers */
212
213 /* Since this is likely called from init or reset, update target state information*/
214 return aarch64_poll(target);
215 }
216
217 /* Write to memory mapped registers directly with no cache or mmu handling */
218 static int aarch64_dap_write_memap_register_u32(struct target *target,
219 uint32_t address,
220 uint32_t value)
221 {
222 int retval;
223 struct armv8_common *armv8 = target_to_armv8(target);
224
225 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
226
227 return retval;
228 }
229
230 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
231 {
232 struct arm_dpm *dpm = &a8->armv8_common.dpm;
233 int retval;
234
235 dpm->arm = &a8->armv8_common.arm;
236 dpm->didr = debug;
237
238 retval = armv8_dpm_setup(dpm);
239 if (retval == ERROR_OK)
240 retval = armv8_dpm_initialize(dpm);
241
242 return retval;
243 }
244
245 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
246 {
247 struct armv8_common *armv8 = target_to_armv8(target);
248 uint32_t dscr;
249
250 /* Read DSCR */
251 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
252 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
253 if (ERROR_OK != retval)
254 return retval;
255
256 /* clear bitfield */
257 dscr &= ~bit_mask;
258 /* put new value */
259 dscr |= value & bit_mask;
260
261 /* write new DSCR */
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
263 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
264 return retval;
265 }
266
267 static struct target *get_aarch64(struct target *target, int32_t coreid)
268 {
269 struct target_list *head;
270 struct target *curr;
271
272 head = target->head;
273 while (head != (struct target_list *)NULL) {
274 curr = head->target;
275 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
276 return curr;
277 head = head->next;
278 }
279 return target;
280 }
281 static int aarch64_halt(struct target *target);
282
283 static int aarch64_halt_smp(struct target *target)
284 {
285 int retval = ERROR_OK;
286 struct target_list *head = target->head;
287
288 while (head != (struct target_list *)NULL) {
289 struct target *curr = head->target;
290 struct armv8_common *armv8 = target_to_armv8(curr);
291
292 /* open the gate for channel 0 to let HALT requests pass to the CTM */
293 if (curr->smp) {
294 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
295 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
296 if (retval == ERROR_OK)
297 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
298 }
299 if (retval != ERROR_OK)
300 break;
301
302 head = head->next;
303 }
304
305 /* halt the target PE */
306 if (retval == ERROR_OK)
307 retval = aarch64_halt(target);
308
309 return retval;
310 }
311
312 static int update_halt_gdb(struct target *target)
313 {
314 int retval = 0;
315 if (target->gdb_service && target->gdb_service->core[0] == -1) {
316 target->gdb_service->target = target;
317 target->gdb_service->core[0] = target->coreid;
318 retval += aarch64_halt_smp(target);
319 }
320 return retval;
321 }
322
323 /*
324 * Cortex-A8 Run control
325 */
326
327 static int aarch64_poll(struct target *target)
328 {
329 int retval = ERROR_OK;
330 uint32_t dscr;
331 struct aarch64_common *aarch64 = target_to_aarch64(target);
332 struct armv8_common *armv8 = &aarch64->armv8_common;
333 enum target_state prev_target_state = target->state;
334 /* toggle to another core is done by gdb as follow */
335 /* maint packet J core_id */
336 /* continue */
337 /* the next polling trigger an halt event sent to gdb */
338 if ((target->state == TARGET_HALTED) && (target->smp) &&
339 (target->gdb_service) &&
340 (target->gdb_service->target == NULL)) {
341 target->gdb_service->target =
342 get_aarch64(target, target->gdb_service->core[1]);
343 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
344 return retval;
345 }
346 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
347 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 aarch64->cpudbg_dscr = dscr;
351
352 if (DSCR_RUN_MODE(dscr) == 0x3) {
353 if (prev_target_state != TARGET_HALTED) {
354 /* We have a halting debug event */
355 LOG_DEBUG("Target %s halted", target_name(target));
356 target->state = TARGET_HALTED;
357 if ((prev_target_state == TARGET_RUNNING)
358 || (prev_target_state == TARGET_UNKNOWN)
359 || (prev_target_state == TARGET_RESET)) {
360 retval = aarch64_debug_entry(target);
361 if (retval != ERROR_OK)
362 return retval;
363 if (target->smp) {
364 retval = update_halt_gdb(target);
365 if (retval != ERROR_OK)
366 return retval;
367 }
368 target_call_event_callbacks(target,
369 TARGET_EVENT_HALTED);
370 }
371 if (prev_target_state == TARGET_DEBUG_RUNNING) {
372 LOG_DEBUG(" ");
373
374 retval = aarch64_debug_entry(target);
375 if (retval != ERROR_OK)
376 return retval;
377 if (target->smp) {
378 retval = update_halt_gdb(target);
379 if (retval != ERROR_OK)
380 return retval;
381 }
382
383 target_call_event_callbacks(target,
384 TARGET_EVENT_DEBUG_HALTED);
385 }
386 }
387 } else
388 target->state = TARGET_RUNNING;
389
390 return retval;
391 }
392
393 static int aarch64_halt(struct target *target)
394 {
395 int retval = ERROR_OK;
396 uint32_t dscr;
397 struct armv8_common *armv8 = target_to_armv8(target);
398
399 /*
400 * add HDE in halting debug mode
401 */
402 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
403 if (retval != ERROR_OK)
404 return retval;
405
406 /* trigger an event on channel 0, this outputs a halt request to the PE */
407 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
408 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
409 if (retval != ERROR_OK)
410 return retval;
411
412 long long then = timeval_ms();
413 for (;; ) {
414 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
415 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
416 if (retval != ERROR_OK)
417 return retval;
418 if ((dscr & DSCRV8_HALT_MASK) != 0)
419 break;
420 if (timeval_ms() > then + 1000) {
421 LOG_ERROR("Timeout waiting for halt");
422 return ERROR_FAIL;
423 }
424 }
425
426 target->debug_reason = DBG_REASON_DBGRQ;
427
428 return ERROR_OK;
429 }
430
431 static int aarch64_internal_restore(struct target *target, int current,
432 uint64_t *address, int handle_breakpoints, int debug_execution)
433 {
434 struct armv8_common *armv8 = target_to_armv8(target);
435 struct arm *arm = &armv8->arm;
436 int retval;
437 uint64_t resume_pc;
438
439 if (!debug_execution)
440 target_free_all_working_areas(target);
441
442 /* current = 1: continue on current pc, otherwise continue at <address> */
443 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
444 if (!current)
445 resume_pc = *address;
446 else
447 *address = resume_pc;
448
449 /* Make sure that the Armv7 gdb thumb fixups does not
450 * kill the return address
451 */
452 switch (arm->core_state) {
453 case ARM_STATE_ARM:
454 resume_pc &= 0xFFFFFFFC;
455 break;
456 case ARM_STATE_AARCH64:
457 resume_pc &= 0xFFFFFFFFFFFFFFFC;
458 break;
459 case ARM_STATE_THUMB:
460 case ARM_STATE_THUMB_EE:
461 /* When the return address is loaded into PC
462 * bit 0 must be 1 to stay in Thumb state
463 */
464 resume_pc |= 0x1;
465 break;
466 case ARM_STATE_JAZELLE:
467 LOG_ERROR("How do I resume into Jazelle state??");
468 return ERROR_FAIL;
469 }
470 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
471 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
472 arm->pc->dirty = 1;
473 arm->pc->valid = 1;
474
475 /* called it now before restoring context because it uses cpu
476 * register r0 for restoring system control register */
477 retval = aarch64_restore_system_control_reg(target);
478 if (retval == ERROR_OK)
479 retval = aarch64_restore_context(target, handle_breakpoints);
480
481 return retval;
482 }
483
484 static int aarch64_internal_restart(struct target *target, bool slave_pe)
485 {
486 struct armv8_common *armv8 = target_to_armv8(target);
487 struct arm *arm = &armv8->arm;
488 int retval;
489 uint32_t dscr;
490 /*
491 * * Restart core and wait for it to be started. Clear ITRen and sticky
492 * * exception flags: see ARMv7 ARM, C5.9.
493 *
494 * REVISIT: for single stepping, we probably want to
495 * disable IRQs by default, with optional override...
496 */
497
498 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
499 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
500 if (retval != ERROR_OK)
501 return retval;
502
503 if ((dscr & DSCR_ITE) == 0)
504 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
505 if ((dscr & DSCR_ERR) != 0)
506 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
507
508 /* make sure to acknowledge the halt event before resuming */
509 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
510 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
511
512 /*
513 * open the CTI gate for channel 1 so that the restart events
514 * get passed along to all PEs
515 */
516 if (retval == ERROR_OK)
517 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
518 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
519 if (retval != ERROR_OK)
520 return retval;
521
522 if (!slave_pe) {
523 /* trigger an event on channel 1, generates a restart request to the PE */
524 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
525 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
526 if (retval != ERROR_OK)
527 return retval;
528
529 long long then = timeval_ms();
530 for (;; ) {
531 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
532 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
533 if (retval != ERROR_OK)
534 return retval;
535 if ((dscr & DSCR_HDE) != 0)
536 break;
537 if (timeval_ms() > then + 1000) {
538 LOG_ERROR("Timeout waiting for resume");
539 return ERROR_FAIL;
540 }
541 }
542 }
543
544 target->debug_reason = DBG_REASON_NOTHALTED;
545 target->state = TARGET_RUNNING;
546
547 /* registers are now invalid */
548 register_cache_invalidate(arm->core_cache);
549 register_cache_invalidate(arm->core_cache->next);
550
551 return ERROR_OK;
552 }
553
554 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
555 {
556 int retval = 0;
557 struct target_list *head;
558 struct target *curr;
559 uint64_t address;
560 head = target->head;
561 while (head != (struct target_list *)NULL) {
562 curr = head->target;
563 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
564 /* resume current address , not in step mode */
565 retval += aarch64_internal_restore(curr, 1, &address,
566 handle_breakpoints, 0);
567 retval += aarch64_internal_restart(curr, true);
568 }
569 head = head->next;
570
571 }
572 return retval;
573 }
574
575 static int aarch64_resume(struct target *target, int current,
576 target_addr_t address, int handle_breakpoints, int debug_execution)
577 {
578 int retval = 0;
579 uint64_t addr = address;
580
581 /* dummy resume for smp toggle in order to reduce gdb impact */
582 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
583 /* simulate a start and halt of target */
584 target->gdb_service->target = NULL;
585 target->gdb_service->core[0] = target->gdb_service->core[1];
586 /* fake resume at next poll we play the target core[1], see poll*/
587 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
588 return 0;
589 }
590
591 if (target->state != TARGET_HALTED)
592 return ERROR_TARGET_NOT_HALTED;
593
594 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
595 debug_execution);
596 if (target->smp) {
597 target->gdb_service->core[0] = -1;
598 retval = aarch64_restore_smp(target, handle_breakpoints);
599 if (retval != ERROR_OK)
600 return retval;
601 }
602 aarch64_internal_restart(target, false);
603
604 if (!debug_execution) {
605 target->state = TARGET_RUNNING;
606 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
607 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
608 } else {
609 target->state = TARGET_DEBUG_RUNNING;
610 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
611 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
612 }
613
614 return ERROR_OK;
615 }
616
617 static int aarch64_debug_entry(struct target *target)
618 {
619 int retval = ERROR_OK;
620 struct aarch64_common *aarch64 = target_to_aarch64(target);
621 struct armv8_common *armv8 = target_to_armv8(target);
622 struct arm_dpm *dpm = &armv8->dpm;
623 enum arm_state core_state;
624
625 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
626
627 dpm->dscr = aarch64->cpudbg_dscr;
628 core_state = armv8_dpm_get_core_state(dpm);
629 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
630 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
631
632 /* make sure to clear all sticky errors */
633 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
635
636 /* discard async exceptions */
637 if (retval == ERROR_OK)
638 retval = dpm->instr_cpsr_sync(dpm);
639
640 if (retval != ERROR_OK)
641 return retval;
642
643 /* Examine debug reason */
644 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
645
646 /* save address of instruction that triggered the watchpoint? */
647 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
648 uint32_t tmp;
649 uint64_t wfar = 0;
650
651 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
652 armv8->debug_base + CPUV8_DBG_WFAR1,
653 &tmp);
654 if (retval != ERROR_OK)
655 return retval;
656 wfar = tmp;
657 wfar = (wfar << 32);
658 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
659 armv8->debug_base + CPUV8_DBG_WFAR0,
660 &tmp);
661 if (retval != ERROR_OK)
662 return retval;
663 wfar |= tmp;
664 armv8_dpm_report_wfar(&armv8->dpm, wfar);
665 }
666
667 retval = armv8_dpm_read_current_registers(&armv8->dpm);
668
669 if (retval == ERROR_OK && armv8->post_debug_entry)
670 retval = armv8->post_debug_entry(target);
671
672 return retval;
673 }
674
675 static int aarch64_post_debug_entry(struct target *target)
676 {
677 struct aarch64_common *aarch64 = target_to_aarch64(target);
678 struct armv8_common *armv8 = &aarch64->armv8_common;
679 int retval;
680 enum arm_mode target_mode = ARM_MODE_ANY;
681 uint32_t instr;
682
683 switch (armv8->arm.core_mode) {
684 case ARMV8_64_EL0T:
685 target_mode = ARMV8_64_EL1H;
686 /* fall through */
687 case ARMV8_64_EL1T:
688 case ARMV8_64_EL1H:
689 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
690 break;
691 case ARMV8_64_EL2T:
692 case ARMV8_64_EL2H:
693 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
694 break;
695 case ARMV8_64_EL3H:
696 case ARMV8_64_EL3T:
697 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
698 break;
699
700 case ARM_MODE_SVC:
701 case ARM_MODE_ABT:
702 case ARM_MODE_FIQ:
703 case ARM_MODE_IRQ:
704 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
705 break;
706
707 default:
708 LOG_INFO("cannot read system control register in this mode");
709 return ERROR_FAIL;
710 }
711
712 if (target_mode != ARM_MODE_ANY)
713 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
714
715 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
716 if (retval != ERROR_OK)
717 return retval;
718
719 if (target_mode != ARM_MODE_ANY)
720 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
721
722 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
723 aarch64->system_control_reg_curr = aarch64->system_control_reg;
724
725 if (armv8->armv8_mmu.armv8_cache.info == -1) {
726 armv8_identify_cache(armv8);
727 armv8_read_mpidr(armv8);
728 }
729
730 armv8->armv8_mmu.mmu_enabled =
731 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
732 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
733 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
734 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
735 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
736 aarch64->curr_mode = armv8->arm.core_mode;
737 return ERROR_OK;
738 }
739
740 static int aarch64_step(struct target *target, int current, target_addr_t address,
741 int handle_breakpoints)
742 {
743 struct armv8_common *armv8 = target_to_armv8(target);
744 int retval;
745 uint32_t edecr;
746
747 if (target->state != TARGET_HALTED) {
748 LOG_WARNING("target not halted");
749 return ERROR_TARGET_NOT_HALTED;
750 }
751
752 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
753 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
754 if (retval != ERROR_OK)
755 return retval;
756
757 /* make sure EDECR.SS is not set when restoring the register */
758 edecr &= ~0x4;
759
760 /* set EDECR.SS to enter hardware step mode */
761 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
762 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
763 if (retval != ERROR_OK)
764 return retval;
765
766 /* disable interrupts while stepping */
767 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
768 if (retval != ERROR_OK)
769 return ERROR_OK;
770
771 /* resume the target */
772 retval = aarch64_resume(target, current, address, 0, 0);
773 if (retval != ERROR_OK)
774 return retval;
775
776 long long then = timeval_ms();
777 while (target->state != TARGET_HALTED) {
778 retval = aarch64_poll(target);
779 if (retval != ERROR_OK)
780 return retval;
781 if (timeval_ms() > then + 1000) {
782 LOG_ERROR("timeout waiting for target halt");
783 return ERROR_FAIL;
784 }
785 }
786
787 /* restore EDECR */
788 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
789 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
790 if (retval != ERROR_OK)
791 return retval;
792
793 /* restore interrupts */
794 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
795 if (retval != ERROR_OK)
796 return ERROR_OK;
797
798 return ERROR_OK;
799 }
800
801 static int aarch64_restore_context(struct target *target, bool bpwp)
802 {
803 struct armv8_common *armv8 = target_to_armv8(target);
804
805 LOG_DEBUG("%s", target_name(target));
806
807 if (armv8->pre_restore_context)
808 armv8->pre_restore_context(target);
809
810 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
811 }
812
813 /*
814 * Cortex-A8 Breakpoint and watchpoint functions
815 */
816
817 /* Setup hardware Breakpoint Register Pair */
818 static int aarch64_set_breakpoint(struct target *target,
819 struct breakpoint *breakpoint, uint8_t matchmode)
820 {
821 int retval;
822 int brp_i = 0;
823 uint32_t control;
824 uint8_t byte_addr_select = 0x0F;
825 struct aarch64_common *aarch64 = target_to_aarch64(target);
826 struct armv8_common *armv8 = &aarch64->armv8_common;
827 struct aarch64_brp *brp_list = aarch64->brp_list;
828
829 if (breakpoint->set) {
830 LOG_WARNING("breakpoint already set");
831 return ERROR_OK;
832 }
833
834 if (breakpoint->type == BKPT_HARD) {
835 int64_t bpt_value;
836 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
837 brp_i++;
838 if (brp_i >= aarch64->brp_num) {
839 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
840 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
841 }
842 breakpoint->set = brp_i + 1;
843 if (breakpoint->length == 2)
844 byte_addr_select = (3 << (breakpoint->address & 0x02));
845 control = ((matchmode & 0x7) << 20)
846 | (1 << 13)
847 | (byte_addr_select << 5)
848 | (3 << 1) | 1;
849 brp_list[brp_i].used = 1;
850 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
851 brp_list[brp_i].control = control;
852 bpt_value = brp_list[brp_i].value;
853
854 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
855 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
856 (uint32_t)(bpt_value & 0xFFFFFFFF));
857 if (retval != ERROR_OK)
858 return retval;
859 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
860 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
861 (uint32_t)(bpt_value >> 32));
862 if (retval != ERROR_OK)
863 return retval;
864
865 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
866 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
867 brp_list[brp_i].control);
868 if (retval != ERROR_OK)
869 return retval;
870 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
871 brp_list[brp_i].control,
872 brp_list[brp_i].value);
873
874 } else if (breakpoint->type == BKPT_SOFT) {
875 uint8_t code[4];
876
877 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
878 retval = target_read_memory(target,
879 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
880 breakpoint->length, 1,
881 breakpoint->orig_instr);
882 if (retval != ERROR_OK)
883 return retval;
884
885 armv8_cache_d_inner_flush_virt(armv8,
886 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
887 breakpoint->length);
888
889 retval = target_write_memory(target,
890 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
891 breakpoint->length, 1, code);
892 if (retval != ERROR_OK)
893 return retval;
894
895 armv8_cache_d_inner_flush_virt(armv8,
896 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
897 breakpoint->length);
898
899 armv8_cache_i_inner_inval_virt(armv8,
900 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
901 breakpoint->length);
902
903 breakpoint->set = 0x11; /* Any nice value but 0 */
904 }
905
906 /* Ensure that halting debug mode is enable */
907 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
908 if (retval != ERROR_OK) {
909 LOG_DEBUG("Failed to set DSCR.HDE");
910 return retval;
911 }
912
913 return ERROR_OK;
914 }
915
916 static int aarch64_set_context_breakpoint(struct target *target,
917 struct breakpoint *breakpoint, uint8_t matchmode)
918 {
919 int retval = ERROR_FAIL;
920 int brp_i = 0;
921 uint32_t control;
922 uint8_t byte_addr_select = 0x0F;
923 struct aarch64_common *aarch64 = target_to_aarch64(target);
924 struct armv8_common *armv8 = &aarch64->armv8_common;
925 struct aarch64_brp *brp_list = aarch64->brp_list;
926
927 if (breakpoint->set) {
928 LOG_WARNING("breakpoint already set");
929 return retval;
930 }
931 /*check available context BRPs*/
932 while ((brp_list[brp_i].used ||
933 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
934 brp_i++;
935
936 if (brp_i >= aarch64->brp_num) {
937 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
938 return ERROR_FAIL;
939 }
940
941 breakpoint->set = brp_i + 1;
942 control = ((matchmode & 0x7) << 20)
943 | (1 << 13)
944 | (byte_addr_select << 5)
945 | (3 << 1) | 1;
946 brp_list[brp_i].used = 1;
947 brp_list[brp_i].value = (breakpoint->asid);
948 brp_list[brp_i].control = control;
949 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
950 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
951 brp_list[brp_i].value);
952 if (retval != ERROR_OK)
953 return retval;
954 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
955 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
956 brp_list[brp_i].control);
957 if (retval != ERROR_OK)
958 return retval;
959 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
960 brp_list[brp_i].control,
961 brp_list[brp_i].value);
962 return ERROR_OK;
963
964 }
965
966 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
967 {
968 int retval = ERROR_FAIL;
969 int brp_1 = 0; /* holds the contextID pair */
970 int brp_2 = 0; /* holds the IVA pair */
971 uint32_t control_CTX, control_IVA;
972 uint8_t CTX_byte_addr_select = 0x0F;
973 uint8_t IVA_byte_addr_select = 0x0F;
974 uint8_t CTX_machmode = 0x03;
975 uint8_t IVA_machmode = 0x01;
976 struct aarch64_common *aarch64 = target_to_aarch64(target);
977 struct armv8_common *armv8 = &aarch64->armv8_common;
978 struct aarch64_brp *brp_list = aarch64->brp_list;
979
980 if (breakpoint->set) {
981 LOG_WARNING("breakpoint already set");
982 return retval;
983 }
984 /*check available context BRPs*/
985 while ((brp_list[brp_1].used ||
986 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
987 brp_1++;
988
989 printf("brp(CTX) found num: %d\n", brp_1);
990 if (brp_1 >= aarch64->brp_num) {
991 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
992 return ERROR_FAIL;
993 }
994
995 while ((brp_list[brp_2].used ||
996 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
997 brp_2++;
998
999 printf("brp(IVA) found num: %d\n", brp_2);
1000 if (brp_2 >= aarch64->brp_num) {
1001 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1002 return ERROR_FAIL;
1003 }
1004
1005 breakpoint->set = brp_1 + 1;
1006 breakpoint->linked_BRP = brp_2;
1007 control_CTX = ((CTX_machmode & 0x7) << 20)
1008 | (brp_2 << 16)
1009 | (0 << 14)
1010 | (CTX_byte_addr_select << 5)
1011 | (3 << 1) | 1;
1012 brp_list[brp_1].used = 1;
1013 brp_list[brp_1].value = (breakpoint->asid);
1014 brp_list[brp_1].control = control_CTX;
1015 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1016 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1017 brp_list[brp_1].value);
1018 if (retval != ERROR_OK)
1019 return retval;
1020 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1021 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1022 brp_list[brp_1].control);
1023 if (retval != ERROR_OK)
1024 return retval;
1025
1026 control_IVA = ((IVA_machmode & 0x7) << 20)
1027 | (brp_1 << 16)
1028 | (1 << 13)
1029 | (IVA_byte_addr_select << 5)
1030 | (3 << 1) | 1;
1031 brp_list[brp_2].used = 1;
1032 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1033 brp_list[brp_2].control = control_IVA;
1034 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1035 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1036 brp_list[brp_2].value & 0xFFFFFFFF);
1037 if (retval != ERROR_OK)
1038 return retval;
1039 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1040 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1041 brp_list[brp_2].value >> 32);
1042 if (retval != ERROR_OK)
1043 return retval;
1044 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1045 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1046 brp_list[brp_2].control);
1047 if (retval != ERROR_OK)
1048 return retval;
1049
1050 return ERROR_OK;
1051 }
1052
1053 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1054 {
1055 int retval;
1056 struct aarch64_common *aarch64 = target_to_aarch64(target);
1057 struct armv8_common *armv8 = &aarch64->armv8_common;
1058 struct aarch64_brp *brp_list = aarch64->brp_list;
1059
1060 if (!breakpoint->set) {
1061 LOG_WARNING("breakpoint not set");
1062 return ERROR_OK;
1063 }
1064
1065 if (breakpoint->type == BKPT_HARD) {
1066 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1067 int brp_i = breakpoint->set - 1;
1068 int brp_j = breakpoint->linked_BRP;
1069 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1070 LOG_DEBUG("Invalid BRP number in breakpoint");
1071 return ERROR_OK;
1072 }
1073 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1074 brp_list[brp_i].control, brp_list[brp_i].value);
1075 brp_list[brp_i].used = 0;
1076 brp_list[brp_i].value = 0;
1077 brp_list[brp_i].control = 0;
1078 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1079 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1080 brp_list[brp_i].control);
1081 if (retval != ERROR_OK)
1082 return retval;
1083 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1084 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1085 (uint32_t)brp_list[brp_i].value);
1086 if (retval != ERROR_OK)
1087 return retval;
1088 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1089 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1090 (uint32_t)brp_list[brp_i].value);
1091 if (retval != ERROR_OK)
1092 return retval;
1093 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1094 LOG_DEBUG("Invalid BRP number in breakpoint");
1095 return ERROR_OK;
1096 }
1097 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1098 brp_list[brp_j].control, brp_list[brp_j].value);
1099 brp_list[brp_j].used = 0;
1100 brp_list[brp_j].value = 0;
1101 brp_list[brp_j].control = 0;
1102 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1103 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1104 brp_list[brp_j].control);
1105 if (retval != ERROR_OK)
1106 return retval;
1107 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1108 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1109 (uint32_t)brp_list[brp_j].value);
1110 if (retval != ERROR_OK)
1111 return retval;
1112 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1113 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1114 (uint32_t)brp_list[brp_j].value);
1115 if (retval != ERROR_OK)
1116 return retval;
1117
1118 breakpoint->linked_BRP = 0;
1119 breakpoint->set = 0;
1120 return ERROR_OK;
1121
1122 } else {
1123 int brp_i = breakpoint->set - 1;
1124 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1125 LOG_DEBUG("Invalid BRP number in breakpoint");
1126 return ERROR_OK;
1127 }
1128 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1129 brp_list[brp_i].control, brp_list[brp_i].value);
1130 brp_list[brp_i].used = 0;
1131 brp_list[brp_i].value = 0;
1132 brp_list[brp_i].control = 0;
1133 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1134 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1135 brp_list[brp_i].control);
1136 if (retval != ERROR_OK)
1137 return retval;
1138 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1139 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1140 brp_list[brp_i].value);
1141 if (retval != ERROR_OK)
1142 return retval;
1143
1144 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1145 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1146 (uint32_t)brp_list[brp_i].value);
1147 if (retval != ERROR_OK)
1148 return retval;
1149 breakpoint->set = 0;
1150 return ERROR_OK;
1151 }
1152 } else {
1153 /* restore original instruction (kept in target endianness) */
1154
1155 armv8_cache_d_inner_flush_virt(armv8,
1156 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1157 breakpoint->length);
1158
1159 if (breakpoint->length == 4) {
1160 retval = target_write_memory(target,
1161 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1162 4, 1, breakpoint->orig_instr);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 } else {
1166 retval = target_write_memory(target,
1167 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1168 2, 1, breakpoint->orig_instr);
1169 if (retval != ERROR_OK)
1170 return retval;
1171 }
1172
1173 armv8_cache_d_inner_flush_virt(armv8,
1174 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1175 breakpoint->length);
1176
1177 armv8_cache_i_inner_inval_virt(armv8,
1178 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1179 breakpoint->length);
1180 }
1181 breakpoint->set = 0;
1182
1183 return ERROR_OK;
1184 }
1185
1186 static int aarch64_add_breakpoint(struct target *target,
1187 struct breakpoint *breakpoint)
1188 {
1189 struct aarch64_common *aarch64 = target_to_aarch64(target);
1190
1191 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1192 LOG_INFO("no hardware breakpoint available");
1193 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1194 }
1195
1196 if (breakpoint->type == BKPT_HARD)
1197 aarch64->brp_num_available--;
1198
1199 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1200 }
1201
1202 static int aarch64_add_context_breakpoint(struct target *target,
1203 struct breakpoint *breakpoint)
1204 {
1205 struct aarch64_common *aarch64 = target_to_aarch64(target);
1206
1207 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1208 LOG_INFO("no hardware breakpoint available");
1209 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1210 }
1211
1212 if (breakpoint->type == BKPT_HARD)
1213 aarch64->brp_num_available--;
1214
1215 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1216 }
1217
1218 static int aarch64_add_hybrid_breakpoint(struct target *target,
1219 struct breakpoint *breakpoint)
1220 {
1221 struct aarch64_common *aarch64 = target_to_aarch64(target);
1222
1223 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1224 LOG_INFO("no hardware breakpoint available");
1225 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1226 }
1227
1228 if (breakpoint->type == BKPT_HARD)
1229 aarch64->brp_num_available--;
1230
1231 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1232 }
1233
1234
1235 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1236 {
1237 struct aarch64_common *aarch64 = target_to_aarch64(target);
1238
1239 #if 0
1240 /* It is perfectly possible to remove breakpoints while the target is running */
1241 if (target->state != TARGET_HALTED) {
1242 LOG_WARNING("target not halted");
1243 return ERROR_TARGET_NOT_HALTED;
1244 }
1245 #endif
1246
1247 if (breakpoint->set) {
1248 aarch64_unset_breakpoint(target, breakpoint);
1249 if (breakpoint->type == BKPT_HARD)
1250 aarch64->brp_num_available++;
1251 }
1252
1253 return ERROR_OK;
1254 }
1255
1256 /*
1257 * Cortex-A8 Reset functions
1258 */
1259
1260 static int aarch64_assert_reset(struct target *target)
1261 {
1262 struct armv8_common *armv8 = target_to_armv8(target);
1263
1264 LOG_DEBUG(" ");
1265
1266 /* FIXME when halt is requested, make it work somehow... */
1267
1268 /* Issue some kind of warm reset. */
1269 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1270 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1271 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1272 /* REVISIT handle "pulls" cases, if there's
1273 * hardware that needs them to work.
1274 */
1275 jtag_add_reset(0, 1);
1276 } else {
1277 LOG_ERROR("%s: how to reset?", target_name(target));
1278 return ERROR_FAIL;
1279 }
1280
1281 /* registers are now invalid */
1282 if (target_was_examined(target))
1283 register_cache_invalidate(armv8->arm.core_cache);
1284
1285 target->state = TARGET_RESET;
1286
1287 return ERROR_OK;
1288 }
1289
1290 static int aarch64_deassert_reset(struct target *target)
1291 {
1292 int retval;
1293
1294 LOG_DEBUG(" ");
1295
1296 /* be certain SRST is off */
1297 jtag_add_reset(0, 0);
1298
1299 if (!target_was_examined(target))
1300 return ERROR_OK;
1301
1302 retval = aarch64_poll(target);
1303 if (retval != ERROR_OK)
1304 return retval;
1305
1306 if (target->reset_halt) {
1307 if (target->state != TARGET_HALTED) {
1308 LOG_WARNING("%s: ran after reset and before halt ...",
1309 target_name(target));
1310 retval = target_halt(target);
1311 if (retval != ERROR_OK)
1312 return retval;
1313 }
1314 }
1315
1316 return ERROR_OK;
1317 }
1318
1319 static int aarch64_write_apb_ap_memory(struct target *target,
1320 uint64_t address, uint32_t size,
1321 uint32_t count, const uint8_t *buffer)
1322 {
1323 /* write memory through APB-AP */
1324 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1325 struct armv8_common *armv8 = target_to_armv8(target);
1326 struct arm_dpm *dpm = &armv8->dpm;
1327 struct arm *arm = &armv8->arm;
1328 int total_bytes = count * size;
1329 int total_u32;
1330 int start_byte = address & 0x3;
1331 int end_byte = (address + total_bytes) & 0x3;
1332 struct reg *reg;
1333 uint32_t dscr;
1334 uint8_t *tmp_buff = NULL;
1335
1336 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count %" PRIu32,
1337 address, size, count);
1338
1339 if (target->state != TARGET_HALTED) {
1340 LOG_WARNING("target not halted");
1341 return ERROR_TARGET_NOT_HALTED;
1342 }
1343
1344 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1345
1346 /* Mark register R0 as dirty, as it will be used
1347 * for transferring the data.
1348 * It will be restored automatically when exiting
1349 * debug mode
1350 */
1351 reg = armv8_reg_current(arm, 1);
1352 reg->dirty = true;
1353
1354 reg = armv8_reg_current(arm, 0);
1355 reg->dirty = true;
1356
1357 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1358
1359 /* The algorithm only copies 32 bit words, so the buffer
1360 * should be expanded to include the words at either end.
1361 * The first and last words will be read first to avoid
1362 * corruption if needed.
1363 */
1364 tmp_buff = malloc(total_u32 * 4);
1365
1366 if ((start_byte != 0) && (total_u32 > 1)) {
1367 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1368 * the other bytes in the word.
1369 */
1370 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1371 if (retval != ERROR_OK)
1372 goto error_free_buff_w;
1373 }
1374
1375 /* If end of write is not aligned, or the write is less than 4 bytes */
1376 if ((end_byte != 0) ||
1377 ((total_u32 == 1) && (total_bytes != 4))) {
1378
1379 /* Read the last word to avoid corruption during 32 bit write */
1380 int mem_offset = (total_u32-1) * 4;
1381 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1382 if (retval != ERROR_OK)
1383 goto error_free_buff_w;
1384 }
1385
1386 /* Copy the write buffer over the top of the temporary buffer */
1387 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1388
1389 /* We now have a 32 bit aligned buffer that can be written */
1390
1391 /* Read DSCR */
1392 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1393 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1394 if (retval != ERROR_OK)
1395 goto error_free_buff_w;
1396
1397 /* Set Normal access mode */
1398 dscr = (dscr & ~DSCR_MA);
1399 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1400 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1401
1402 if (arm->core_state == ARM_STATE_AARCH64) {
1403 /* Write X0 with value 'address' using write procedure */
1404 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1405 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1406 retval = dpm->instr_write_data_dcc_64(dpm,
1407 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1408 } else {
1409 /* Write R0 with value 'address' using write procedure */
1410 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1411 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1412 dpm->instr_write_data_dcc(dpm,
1413 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1414
1415 }
1416 /* Step 1.d - Change DCC to memory mode */
1417 dscr = dscr | DSCR_MA;
1418 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1419 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1420 if (retval != ERROR_OK)
1421 goto error_unset_dtr_w;
1422
1423
1424 /* Step 2.a - Do the write */
1425 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1426 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1427 if (retval != ERROR_OK)
1428 goto error_unset_dtr_w;
1429
1430 /* Step 3.a - Switch DTR mode back to Normal mode */
1431 dscr = (dscr & ~DSCR_MA);
1432 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1433 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1434 if (retval != ERROR_OK)
1435 goto error_unset_dtr_w;
1436
1437 /* Check for sticky abort flags in the DSCR */
1438 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1439 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1440 if (retval != ERROR_OK)
1441 goto error_free_buff_w;
1442
1443 dpm->dscr = dscr;
1444 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1445 /* Abort occurred - clear it and exit */
1446 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1447 armv8_dpm_handle_exception(dpm);
1448 goto error_free_buff_w;
1449 }
1450
1451 /* Done */
1452 free(tmp_buff);
1453 return ERROR_OK;
1454
1455 error_unset_dtr_w:
1456 /* Unset DTR mode */
1457 mem_ap_read_atomic_u32(armv8->debug_ap,
1458 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1459 dscr = (dscr & ~DSCR_MA);
1460 mem_ap_write_atomic_u32(armv8->debug_ap,
1461 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1462 error_free_buff_w:
1463 LOG_ERROR("error");
1464 free(tmp_buff);
1465 return ERROR_FAIL;
1466 }
1467
1468 static int aarch64_read_apb_ap_memory(struct target *target,
1469 target_addr_t address, uint32_t size,
1470 uint32_t count, uint8_t *buffer)
1471 {
1472 /* read memory through APB-AP */
1473 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1474 struct armv8_common *armv8 = target_to_armv8(target);
1475 struct arm_dpm *dpm = &armv8->dpm;
1476 struct arm *arm = &armv8->arm;
1477 int total_bytes = count * size;
1478 int total_u32;
1479 int start_byte = address & 0x3;
1480 int end_byte = (address + total_bytes) & 0x3;
1481 struct reg *reg;
1482 uint32_t dscr;
1483 uint8_t *tmp_buff = NULL;
1484 uint8_t *u8buf_ptr;
1485 uint32_t value;
1486
1487 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count %" PRIu32,
1488 address, size, count);
1489
1490 if (target->state != TARGET_HALTED) {
1491 LOG_WARNING("target not halted");
1492 return ERROR_TARGET_NOT_HALTED;
1493 }
1494
1495 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1496 /* Mark register X0, X1 as dirty, as it will be used
1497 * for transferring the data.
1498 * It will be restored automatically when exiting
1499 * debug mode
1500 */
1501 reg = armv8_reg_current(arm, 1);
1502 reg->dirty = true;
1503
1504 reg = armv8_reg_current(arm, 0);
1505 reg->dirty = true;
1506
1507 /* Read DSCR */
1508 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1509 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1510
1511 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1512
1513 /* Set Normal access mode */
1514 dscr = (dscr & ~DSCR_MA);
1515 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1516 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1517
1518 if (arm->core_state == ARM_STATE_AARCH64) {
1519 /* Write X0 with value 'address' using write procedure */
1520 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1521 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1522 retval += dpm->instr_write_data_dcc_64(dpm,
1523 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1524 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1525 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1526 /* Step 1.e - Change DCC to memory mode */
1527 dscr = dscr | DSCR_MA;
1528 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1529 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1530 /* Step 1.f - read DBGDTRTX and discard the value */
1531 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1532 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1533 } else {
1534 /* Write R0 with value 'address' using write procedure */
1535 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1536 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1537 retval += dpm->instr_write_data_dcc(dpm,
1538 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1539 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1540 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1541 /* Step 1.e - Change DCC to memory mode */
1542 dscr = dscr | DSCR_MA;
1543 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1544 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1545 /* Step 1.f - read DBGDTRTX and discard the value */
1546 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1547 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1548
1549 }
1550 if (retval != ERROR_OK)
1551 goto error_unset_dtr_r;
1552
1553 /* Optimize the read as much as we can, either way we read in a single pass */
1554 if ((start_byte) || (end_byte)) {
1555 /* The algorithm only copies 32 bit words, so the buffer
1556 * should be expanded to include the words at either end.
1557 * The first and last words will be read into a temp buffer
1558 * to avoid corruption
1559 */
1560 tmp_buff = malloc(total_u32 * 4);
1561 if (!tmp_buff)
1562 goto error_unset_dtr_r;
1563
1564 /* use the tmp buffer to read the entire data */
1565 u8buf_ptr = tmp_buff;
1566 } else
1567 /* address and read length are aligned so read directly into the passed buffer */
1568 u8buf_ptr = buffer;
1569
1570 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1571 * Abort flags are sticky, so can be read at end of transactions
1572 *
1573 * This data is read in aligned to 32 bit boundary.
1574 */
1575
1576 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1577 * increments X0 by 4. */
1578 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1579 armv8->debug_base + CPUV8_DBG_DTRTX);
1580 if (retval != ERROR_OK)
1581 goto error_unset_dtr_r;
1582
1583 /* Step 3.a - set DTR access mode back to Normal mode */
1584 dscr = (dscr & ~DSCR_MA);
1585 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1586 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1587 if (retval != ERROR_OK)
1588 goto error_free_buff_r;
1589
1590 /* Step 3.b - read DBGDTRTX for the final value */
1591 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1592 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1593 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1594
1595 /* Check for sticky abort flags in the DSCR */
1596 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1597 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1598 if (retval != ERROR_OK)
1599 goto error_free_buff_r;
1600
1601 dpm->dscr = dscr;
1602
1603 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1604 /* Abort occurred - clear it and exit */
1605 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1606 armv8_dpm_handle_exception(dpm);
1607 goto error_free_buff_r;
1608 }
1609
1610 /* check if we need to copy aligned data by applying any shift necessary */
1611 if (tmp_buff) {
1612 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1613 free(tmp_buff);
1614 }
1615
1616 /* Done */
1617 return ERROR_OK;
1618
1619 error_unset_dtr_r:
1620 /* Unset DTR mode */
1621 mem_ap_read_atomic_u32(armv8->debug_ap,
1622 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1623 dscr = (dscr & ~DSCR_MA);
1624 mem_ap_write_atomic_u32(armv8->debug_ap,
1625 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1626 error_free_buff_r:
1627 LOG_ERROR("error");
1628 free(tmp_buff);
1629 return ERROR_FAIL;
1630 }
1631
1632 static int aarch64_read_phys_memory(struct target *target,
1633 target_addr_t address, uint32_t size,
1634 uint32_t count, uint8_t *buffer)
1635 {
1636 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1637 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1638 address, size, count);
1639
1640 if (count && buffer) {
1641 /* read memory through APB-AP */
1642 retval = aarch64_mmu_modify(target, 0);
1643 if (retval != ERROR_OK)
1644 return retval;
1645 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1646 }
1647 return retval;
1648 }
1649
1650 static int aarch64_read_memory(struct target *target, target_addr_t address,
1651 uint32_t size, uint32_t count, uint8_t *buffer)
1652 {
1653 int mmu_enabled = 0;
1654 int retval;
1655
1656 /* aarch64 handles unaligned memory access */
1657 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1658 size, count);
1659
1660 /* determine if MMU was enabled on target stop */
1661 retval = aarch64_mmu(target, &mmu_enabled);
1662 if (retval != ERROR_OK)
1663 return retval;
1664
1665 if (mmu_enabled) {
1666 retval = aarch64_check_address(target, address);
1667 if (retval != ERROR_OK)
1668 return retval;
1669 /* enable MMU as we could have disabled it for phys access */
1670 retval = aarch64_mmu_modify(target, 1);
1671 if (retval != ERROR_OK)
1672 return retval;
1673 }
1674 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1675 }
1676
1677 static int aarch64_write_phys_memory(struct target *target,
1678 target_addr_t address, uint32_t size,
1679 uint32_t count, const uint8_t *buffer)
1680 {
1681 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1682
1683 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1684 size, count);
1685
1686 if (count && buffer) {
1687 /* write memory through APB-AP */
1688 retval = aarch64_mmu_modify(target, 0);
1689 if (retval != ERROR_OK)
1690 return retval;
1691 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1692 }
1693
1694 return retval;
1695 }
1696
1697 static int aarch64_write_memory(struct target *target, target_addr_t address,
1698 uint32_t size, uint32_t count, const uint8_t *buffer)
1699 {
1700 int mmu_enabled = 0;
1701 int retval;
1702
1703 /* aarch64 handles unaligned memory access */
1704 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1705 "; count %" PRId32, address, size, count);
1706
1707 /* determine if MMU was enabled on target stop */
1708 retval = aarch64_mmu(target, &mmu_enabled);
1709 if (retval != ERROR_OK)
1710 return retval;
1711
1712 if (mmu_enabled) {
1713 retval = aarch64_check_address(target, address);
1714 if (retval != ERROR_OK)
1715 return retval;
1716 /* enable MMU as we could have disabled it for phys access */
1717 retval = aarch64_mmu_modify(target, 1);
1718 if (retval != ERROR_OK)
1719 return retval;
1720 }
1721 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1722 }
1723
1724 static int aarch64_handle_target_request(void *priv)
1725 {
1726 struct target *target = priv;
1727 struct armv8_common *armv8 = target_to_armv8(target);
1728 int retval;
1729
1730 if (!target_was_examined(target))
1731 return ERROR_OK;
1732 if (!target->dbg_msg_enabled)
1733 return ERROR_OK;
1734
1735 if (target->state == TARGET_RUNNING) {
1736 uint32_t request;
1737 uint32_t dscr;
1738 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1739 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1740
1741 /* check if we have data */
1742 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1743 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1744 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1745 if (retval == ERROR_OK) {
1746 target_request(target, request);
1747 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1748 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1749 }
1750 }
1751 }
1752
1753 return ERROR_OK;
1754 }
1755
1756 static int aarch64_examine_first(struct target *target)
1757 {
1758 struct aarch64_common *aarch64 = target_to_aarch64(target);
1759 struct armv8_common *armv8 = &aarch64->armv8_common;
1760 struct adiv5_dap *swjdp = armv8->arm.dap;
1761 int i;
1762 int retval = ERROR_OK;
1763 uint64_t debug, ttypr;
1764 uint32_t cpuid;
1765 uint32_t tmp0, tmp1;
1766 debug = ttypr = cpuid = 0;
1767
1768 /* We do one extra read to ensure DAP is configured,
1769 * we call ahbap_debugport_init(swjdp) instead
1770 */
1771 retval = dap_dp_init(swjdp);
1772 if (retval != ERROR_OK)
1773 return retval;
1774
1775 /* Search for the APB-AB - it is needed for access to debug registers */
1776 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1777 if (retval != ERROR_OK) {
1778 LOG_ERROR("Could not find APB-AP for debug access");
1779 return retval;
1780 }
1781
1782 retval = mem_ap_init(armv8->debug_ap);
1783 if (retval != ERROR_OK) {
1784 LOG_ERROR("Could not initialize the APB-AP");
1785 return retval;
1786 }
1787
1788 armv8->debug_ap->memaccess_tck = 80;
1789
1790 if (!target->dbgbase_set) {
1791 uint32_t dbgbase;
1792 /* Get ROM Table base */
1793 uint32_t apid;
1794 int32_t coreidx = target->coreid;
1795 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1796 if (retval != ERROR_OK)
1797 return retval;
1798 /* Lookup 0x15 -- Processor DAP */
1799 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1800 &armv8->debug_base, &coreidx);
1801 if (retval != ERROR_OK)
1802 return retval;
1803 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1804 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1805 } else
1806 armv8->debug_base = target->dbgbase;
1807
1808 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1809 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1810 if (retval != ERROR_OK) {
1811 LOG_DEBUG("LOCK debug access fail");
1812 return retval;
1813 }
1814
1815 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1816 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1817 if (retval != ERROR_OK) {
1818 LOG_DEBUG("Examine %s failed", "oslock");
1819 return retval;
1820 }
1821
1822 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1823 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1824 if (retval != ERROR_OK) {
1825 LOG_DEBUG("Examine %s failed", "CPUID");
1826 return retval;
1827 }
1828
1829 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1830 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1831 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1832 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1833 if (retval != ERROR_OK) {
1834 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1835 return retval;
1836 }
1837 ttypr |= tmp1;
1838 ttypr = (ttypr << 32) | tmp0;
1839
1840 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1841 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1842 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1843 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1844 if (retval != ERROR_OK) {
1845 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1846 return retval;
1847 }
1848 debug |= tmp1;
1849 debug = (debug << 32) | tmp0;
1850
1851 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1852 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1853 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1854
1855 if (target->ctibase == 0) {
1856 /* assume a v8 rom table layout */
1857 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1858 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1859 } else
1860 armv8->cti_base = target->ctibase;
1861
1862 armv8->arm.core_type = ARM_MODE_MON;
1863 retval = aarch64_dpm_setup(aarch64, debug);
1864 if (retval != ERROR_OK)
1865 return retval;
1866
1867 /* Setup Breakpoint Register Pairs */
1868 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1869 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1870 aarch64->brp_num_available = aarch64->brp_num;
1871 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1872 for (i = 0; i < aarch64->brp_num; i++) {
1873 aarch64->brp_list[i].used = 0;
1874 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1875 aarch64->brp_list[i].type = BRP_NORMAL;
1876 else
1877 aarch64->brp_list[i].type = BRP_CONTEXT;
1878 aarch64->brp_list[i].value = 0;
1879 aarch64->brp_list[i].control = 0;
1880 aarch64->brp_list[i].BRPn = i;
1881 }
1882
1883 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1884
1885 target_set_examined(target);
1886 return ERROR_OK;
1887 }
1888
1889 static int aarch64_examine(struct target *target)
1890 {
1891 int retval = ERROR_OK;
1892
1893 /* don't re-probe hardware after each reset */
1894 if (!target_was_examined(target))
1895 retval = aarch64_examine_first(target);
1896
1897 /* Configure core debug access */
1898 if (retval == ERROR_OK)
1899 retval = aarch64_init_debug_access(target);
1900
1901 return retval;
1902 }
1903
1904 /*
1905 * Cortex-A8 target creation and initialization
1906 */
1907
1908 static int aarch64_init_target(struct command_context *cmd_ctx,
1909 struct target *target)
1910 {
1911 /* examine_first() does a bunch of this */
1912 return ERROR_OK;
1913 }
1914
1915 static int aarch64_init_arch_info(struct target *target,
1916 struct aarch64_common *aarch64, struct jtag_tap *tap)
1917 {
1918 struct armv8_common *armv8 = &aarch64->armv8_common;
1919 struct adiv5_dap *dap = armv8->arm.dap;
1920
1921 armv8->arm.dap = dap;
1922
1923 /* Setup struct aarch64_common */
1924 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1925 /* tap has no dap initialized */
1926 if (!tap->dap) {
1927 tap->dap = dap_init();
1928
1929 /* Leave (only) generic DAP stuff for debugport_init() */
1930 tap->dap->tap = tap;
1931 }
1932
1933 armv8->arm.dap = tap->dap;
1934
1935 aarch64->fast_reg_read = 0;
1936
1937 /* register arch-specific functions */
1938 armv8->examine_debug_reason = NULL;
1939
1940 armv8->post_debug_entry = aarch64_post_debug_entry;
1941
1942 armv8->pre_restore_context = NULL;
1943
1944 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1945
1946 /* REVISIT v7a setup should be in a v7a-specific routine */
1947 armv8_init_arch_info(target, armv8);
1948 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1949
1950 return ERROR_OK;
1951 }
1952
1953 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1954 {
1955 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1956
1957 return aarch64_init_arch_info(target, aarch64, target->tap);
1958 }
1959
1960 static int aarch64_mmu(struct target *target, int *enabled)
1961 {
1962 if (target->state != TARGET_HALTED) {
1963 LOG_ERROR("%s: target not halted", __func__);
1964 return ERROR_TARGET_INVALID;
1965 }
1966
1967 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
1968 return ERROR_OK;
1969 }
1970
1971 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
1972 target_addr_t *phys)
1973 {
1974 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
1975 }
1976
1977 COMMAND_HANDLER(aarch64_handle_cache_info_command)
1978 {
1979 struct target *target = get_current_target(CMD_CTX);
1980 struct armv8_common *armv8 = target_to_armv8(target);
1981
1982 return armv8_handle_cache_info_command(CMD_CTX,
1983 &armv8->armv8_mmu.armv8_cache);
1984 }
1985
1986
1987 COMMAND_HANDLER(aarch64_handle_dbginit_command)
1988 {
1989 struct target *target = get_current_target(CMD_CTX);
1990 if (!target_was_examined(target)) {
1991 LOG_ERROR("target not examined yet");
1992 return ERROR_FAIL;
1993 }
1994
1995 return aarch64_init_debug_access(target);
1996 }
1997 COMMAND_HANDLER(aarch64_handle_smp_off_command)
1998 {
1999 struct target *target = get_current_target(CMD_CTX);
2000 /* check target is an smp target */
2001 struct target_list *head;
2002 struct target *curr;
2003 head = target->head;
2004 target->smp = 0;
2005 if (head != (struct target_list *)NULL) {
2006 while (head != (struct target_list *)NULL) {
2007 curr = head->target;
2008 curr->smp = 0;
2009 head = head->next;
2010 }
2011 /* fixes the target display to the debugger */
2012 target->gdb_service->target = target;
2013 }
2014 return ERROR_OK;
2015 }
2016
2017 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2018 {
2019 struct target *target = get_current_target(CMD_CTX);
2020 struct target_list *head;
2021 struct target *curr;
2022 head = target->head;
2023 if (head != (struct target_list *)NULL) {
2024 target->smp = 1;
2025 while (head != (struct target_list *)NULL) {
2026 curr = head->target;
2027 curr->smp = 1;
2028 head = head->next;
2029 }
2030 }
2031 return ERROR_OK;
2032 }
2033
2034 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2035 {
2036 struct target *target = get_current_target(CMD_CTX);
2037 int retval = ERROR_OK;
2038 struct target_list *head;
2039 head = target->head;
2040 if (head != (struct target_list *)NULL) {
2041 if (CMD_ARGC == 1) {
2042 int coreid = 0;
2043 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2044 if (ERROR_OK != retval)
2045 return retval;
2046 target->gdb_service->core[1] = coreid;
2047
2048 }
2049 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2050 , target->gdb_service->core[1]);
2051 }
2052 return ERROR_OK;
2053 }
2054
2055 static const struct command_registration aarch64_exec_command_handlers[] = {
2056 {
2057 .name = "cache_info",
2058 .handler = aarch64_handle_cache_info_command,
2059 .mode = COMMAND_EXEC,
2060 .help = "display information about target caches",
2061 .usage = "",
2062 },
2063 {
2064 .name = "dbginit",
2065 .handler = aarch64_handle_dbginit_command,
2066 .mode = COMMAND_EXEC,
2067 .help = "Initialize core debug",
2068 .usage = "",
2069 },
2070 { .name = "smp_off",
2071 .handler = aarch64_handle_smp_off_command,
2072 .mode = COMMAND_EXEC,
2073 .help = "Stop smp handling",
2074 .usage = "",
2075 },
2076 {
2077 .name = "smp_on",
2078 .handler = aarch64_handle_smp_on_command,
2079 .mode = COMMAND_EXEC,
2080 .help = "Restart smp handling",
2081 .usage = "",
2082 },
2083 {
2084 .name = "smp_gdb",
2085 .handler = aarch64_handle_smp_gdb_command,
2086 .mode = COMMAND_EXEC,
2087 .help = "display/fix current core played to gdb",
2088 .usage = "",
2089 },
2090
2091
2092 COMMAND_REGISTRATION_DONE
2093 };
2094 static const struct command_registration aarch64_command_handlers[] = {
2095 {
2096 .chain = armv8_command_handlers,
2097 },
2098 {
2099 .name = "cortex_a",
2100 .mode = COMMAND_ANY,
2101 .help = "Cortex-A command group",
2102 .usage = "",
2103 .chain = aarch64_exec_command_handlers,
2104 },
2105 COMMAND_REGISTRATION_DONE
2106 };
2107
2108 struct target_type aarch64_target = {
2109 .name = "aarch64",
2110
2111 .poll = aarch64_poll,
2112 .arch_state = armv8_arch_state,
2113
2114 .halt = aarch64_halt,
2115 .resume = aarch64_resume,
2116 .step = aarch64_step,
2117
2118 .assert_reset = aarch64_assert_reset,
2119 .deassert_reset = aarch64_deassert_reset,
2120
2121 /* REVISIT allow exporting VFP3 registers ... */
2122 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2123
2124 .read_memory = aarch64_read_memory,
2125 .write_memory = aarch64_write_memory,
2126
2127 .checksum_memory = arm_checksum_memory,
2128 .blank_check_memory = arm_blank_check_memory,
2129
2130 .run_algorithm = armv4_5_run_algorithm,
2131
2132 .add_breakpoint = aarch64_add_breakpoint,
2133 .add_context_breakpoint = aarch64_add_context_breakpoint,
2134 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2135 .remove_breakpoint = aarch64_remove_breakpoint,
2136 .add_watchpoint = NULL,
2137 .remove_watchpoint = NULL,
2138
2139 .commands = aarch64_command_handlers,
2140 .target_create = aarch64_target_create,
2141 .init_target = aarch64_init_target,
2142 .examine = aarch64_examine,
2143
2144 .read_phys_memory = aarch64_read_phys_memory,
2145 .write_phys_memory = aarch64_write_phys_memory,
2146 .mmu = aarch64_mmu,
2147 .virt2phys = aarch64_virt2phys,
2148 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)