armv8: factor out generic bit set/clr for debug registers
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 enum arm_mode target_mode = ARM_MODE_ANY;
53 int retval = ERROR_OK;
54 uint32_t instr;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 target_mode = ARMV8_64_EL1H;
66 /* fall through */
67 case ARMV8_64_EL1T:
68 case ARMV8_64_EL1H:
69 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
70 break;
71 case ARMV8_64_EL2T:
72 case ARMV8_64_EL2H:
73 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
74 break;
75 case ARMV8_64_EL3H:
76 case ARMV8_64_EL3T:
77 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
78 break;
79
80 case ARM_MODE_SVC:
81 case ARM_MODE_ABT:
82 case ARM_MODE_FIQ:
83 case ARM_MODE_IRQ:
84 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
85 break;
86
87 default:
88 LOG_INFO("cannot read system control register in this mode");
89 return ERROR_FAIL;
90 }
91
92 if (target_mode != ARM_MODE_ANY)
93 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
94
95 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
96 if (retval != ERROR_OK)
97 return retval;
98
99 if (target_mode != ARM_MODE_ANY)
100 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
101 }
102
103 return retval;
104 }
105
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target *target, int enable)
110 {
111 struct aarch64_common *aarch64 = target_to_aarch64(target);
112 struct armv8_common *armv8 = &aarch64->armv8_common;
113 int retval = ERROR_OK;
114 uint32_t instr = 0;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U))
123 aarch64->system_control_reg_curr |= 0x1U;
124 } else {
125 if (aarch64->system_control_reg_curr & 0x4U) {
126 /* data cache is active */
127 aarch64->system_control_reg_curr &= ~0x4U;
128 /* flush data cache armv8 function to be called */
129 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
130 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
131 }
132 if ((aarch64->system_control_reg_curr & 0x1U)) {
133 aarch64->system_control_reg_curr &= ~0x1U;
134 }
135 }
136
137 switch (armv8->arm.core_mode) {
138 case ARMV8_64_EL0T:
139 case ARMV8_64_EL1T:
140 case ARMV8_64_EL1H:
141 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
142 break;
143 case ARMV8_64_EL2T:
144 case ARMV8_64_EL2H:
145 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
146 break;
147 case ARMV8_64_EL3H:
148 case ARMV8_64_EL3T:
149 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
150 break;
151 default:
152 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 break;
154 }
155
156 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
157 aarch64->system_control_reg_curr);
158 return retval;
159 }
160
161 /*
162 * Basic debug access, very low level assumes state is saved
163 */
164 static int aarch64_init_debug_access(struct target *target)
165 {
166 struct armv8_common *armv8 = target_to_armv8(target);
167 int retval;
168 uint32_t dummy;
169
170 LOG_DEBUG(" ");
171
172 /* Clear Sticky Power Down status Bit in PRSR to enable access to
173 the registers in the Core Power Domain */
174 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
175 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
176 if (retval != ERROR_OK)
177 return retval;
178
179 /*
180 * Static CTI configuration:
181 * Channel 0 -> trigger outputs HALT request to PE
182 * Channel 1 -> trigger outputs Resume request to PE
183 * Gate all channel trigger events from entering the CTM
184 */
185
186 /* Enable CTI */
187 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
188 armv8->cti_base + CTI_CTR, 1);
189 /* By default, gate all channel triggers to and from the CTM */
190 if (retval == ERROR_OK)
191 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
192 armv8->cti_base + CTI_GATE, 0);
193 /* output halt requests to PE on channel 0 trigger */
194 if (retval == ERROR_OK)
195 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
196 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
197 /* output restart requests to PE on channel 1 trigger */
198 if (retval == ERROR_OK)
199 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
200 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
201 if (retval != ERROR_OK)
202 return retval;
203
204 /* Resync breakpoint registers */
205
206 /* Since this is likely called from init or reset, update target state information*/
207 return aarch64_poll(target);
208 }
209
210 /* Write to memory mapped registers directly with no cache or mmu handling */
211 static int aarch64_dap_write_memap_register_u32(struct target *target,
212 uint32_t address,
213 uint32_t value)
214 {
215 int retval;
216 struct armv8_common *armv8 = target_to_armv8(target);
217
218 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
219
220 return retval;
221 }
222
223 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
224 {
225 struct arm_dpm *dpm = &a8->armv8_common.dpm;
226 int retval;
227
228 dpm->arm = &a8->armv8_common.arm;
229 dpm->didr = debug;
230
231 retval = armv8_dpm_setup(dpm);
232 if (retval == ERROR_OK)
233 retval = armv8_dpm_initialize(dpm);
234
235 return retval;
236 }
237
238 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
239 {
240 struct armv8_common *armv8 = target_to_armv8(target);
241 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
242 }
243
244 static struct target *get_aarch64(struct target *target, int32_t coreid)
245 {
246 struct target_list *head;
247 struct target *curr;
248
249 head = target->head;
250 while (head != (struct target_list *)NULL) {
251 curr = head->target;
252 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
253 return curr;
254 head = head->next;
255 }
256 return target;
257 }
258 static int aarch64_halt(struct target *target);
259
260 static int aarch64_halt_smp(struct target *target)
261 {
262 int retval = ERROR_OK;
263 struct target_list *head = target->head;
264
265 while (head != (struct target_list *)NULL) {
266 struct target *curr = head->target;
267 struct armv8_common *armv8 = target_to_armv8(curr);
268
269 /* open the gate for channel 0 to let HALT requests pass to the CTM */
270 if (curr->smp) {
271 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
272 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
273 if (retval == ERROR_OK)
274 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
275 }
276 if (retval != ERROR_OK)
277 break;
278
279 head = head->next;
280 }
281
282 /* halt the target PE */
283 if (retval == ERROR_OK)
284 retval = aarch64_halt(target);
285
286 return retval;
287 }
288
289 static int update_halt_gdb(struct target *target)
290 {
291 int retval = 0;
292 if (target->gdb_service && target->gdb_service->core[0] == -1) {
293 target->gdb_service->target = target;
294 target->gdb_service->core[0] = target->coreid;
295 retval += aarch64_halt_smp(target);
296 }
297 return retval;
298 }
299
300 /*
301 * Cortex-A8 Run control
302 */
303
304 static int aarch64_poll(struct target *target)
305 {
306 int retval = ERROR_OK;
307 uint32_t dscr;
308 struct aarch64_common *aarch64 = target_to_aarch64(target);
309 struct armv8_common *armv8 = &aarch64->armv8_common;
310 enum target_state prev_target_state = target->state;
311 /* toggle to another core is done by gdb as follow */
312 /* maint packet J core_id */
313 /* continue */
314 /* the next polling trigger an halt event sent to gdb */
315 if ((target->state == TARGET_HALTED) && (target->smp) &&
316 (target->gdb_service) &&
317 (target->gdb_service->target == NULL)) {
318 target->gdb_service->target =
319 get_aarch64(target, target->gdb_service->core[1]);
320 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
321 return retval;
322 }
323 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
324 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
325 if (retval != ERROR_OK)
326 return retval;
327 aarch64->cpudbg_dscr = dscr;
328
329 if (DSCR_RUN_MODE(dscr) == 0x3) {
330 if (prev_target_state != TARGET_HALTED) {
331 /* We have a halting debug event */
332 LOG_DEBUG("Target %s halted", target_name(target));
333 target->state = TARGET_HALTED;
334 if ((prev_target_state == TARGET_RUNNING)
335 || (prev_target_state == TARGET_UNKNOWN)
336 || (prev_target_state == TARGET_RESET)) {
337 retval = aarch64_debug_entry(target);
338 if (retval != ERROR_OK)
339 return retval;
340 if (target->smp) {
341 retval = update_halt_gdb(target);
342 if (retval != ERROR_OK)
343 return retval;
344 }
345 target_call_event_callbacks(target,
346 TARGET_EVENT_HALTED);
347 }
348 if (prev_target_state == TARGET_DEBUG_RUNNING) {
349 LOG_DEBUG(" ");
350
351 retval = aarch64_debug_entry(target);
352 if (retval != ERROR_OK)
353 return retval;
354 if (target->smp) {
355 retval = update_halt_gdb(target);
356 if (retval != ERROR_OK)
357 return retval;
358 }
359
360 target_call_event_callbacks(target,
361 TARGET_EVENT_DEBUG_HALTED);
362 }
363 }
364 } else
365 target->state = TARGET_RUNNING;
366
367 return retval;
368 }
369
370 static int aarch64_halt(struct target *target)
371 {
372 int retval = ERROR_OK;
373 uint32_t dscr;
374 struct armv8_common *armv8 = target_to_armv8(target);
375
376 /*
377 * add HDE in halting debug mode
378 */
379 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
380 if (retval != ERROR_OK)
381 return retval;
382
383 /* trigger an event on channel 0, this outputs a halt request to the PE */
384 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
385 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
386 if (retval != ERROR_OK)
387 return retval;
388
389 long long then = timeval_ms();
390 for (;; ) {
391 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
392 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
393 if (retval != ERROR_OK)
394 return retval;
395 if ((dscr & DSCRV8_HALT_MASK) != 0)
396 break;
397 if (timeval_ms() > then + 1000) {
398 LOG_ERROR("Timeout waiting for halt");
399 return ERROR_FAIL;
400 }
401 }
402
403 target->debug_reason = DBG_REASON_DBGRQ;
404
405 return ERROR_OK;
406 }
407
408 static int aarch64_internal_restore(struct target *target, int current,
409 uint64_t *address, int handle_breakpoints, int debug_execution)
410 {
411 struct armv8_common *armv8 = target_to_armv8(target);
412 struct arm *arm = &armv8->arm;
413 int retval;
414 uint64_t resume_pc;
415
416 if (!debug_execution)
417 target_free_all_working_areas(target);
418
419 /* current = 1: continue on current pc, otherwise continue at <address> */
420 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
421 if (!current)
422 resume_pc = *address;
423 else
424 *address = resume_pc;
425
426 /* Make sure that the Armv7 gdb thumb fixups does not
427 * kill the return address
428 */
429 switch (arm->core_state) {
430 case ARM_STATE_ARM:
431 resume_pc &= 0xFFFFFFFC;
432 break;
433 case ARM_STATE_AARCH64:
434 resume_pc &= 0xFFFFFFFFFFFFFFFC;
435 break;
436 case ARM_STATE_THUMB:
437 case ARM_STATE_THUMB_EE:
438 /* When the return address is loaded into PC
439 * bit 0 must be 1 to stay in Thumb state
440 */
441 resume_pc |= 0x1;
442 break;
443 case ARM_STATE_JAZELLE:
444 LOG_ERROR("How do I resume into Jazelle state??");
445 return ERROR_FAIL;
446 }
447 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
448 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
449 arm->pc->dirty = 1;
450 arm->pc->valid = 1;
451
452 /* called it now before restoring context because it uses cpu
453 * register r0 for restoring system control register */
454 retval = aarch64_restore_system_control_reg(target);
455 if (retval == ERROR_OK)
456 retval = aarch64_restore_context(target, handle_breakpoints);
457
458 return retval;
459 }
460
461 static int aarch64_internal_restart(struct target *target, bool slave_pe)
462 {
463 struct armv8_common *armv8 = target_to_armv8(target);
464 struct arm *arm = &armv8->arm;
465 int retval;
466 uint32_t dscr;
467 /*
468 * * Restart core and wait for it to be started. Clear ITRen and sticky
469 * * exception flags: see ARMv7 ARM, C5.9.
470 *
471 * REVISIT: for single stepping, we probably want to
472 * disable IRQs by default, with optional override...
473 */
474
475 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
476 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
477 if (retval != ERROR_OK)
478 return retval;
479
480 if ((dscr & DSCR_ITE) == 0)
481 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
482 if ((dscr & DSCR_ERR) != 0)
483 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
484
485 /* make sure to acknowledge the halt event before resuming */
486 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
487 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
488
489 /*
490 * open the CTI gate for channel 1 so that the restart events
491 * get passed along to all PEs
492 */
493 if (retval == ERROR_OK)
494 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
495 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
496 if (retval != ERROR_OK)
497 return retval;
498
499 if (!slave_pe) {
500 /* trigger an event on channel 1, generates a restart request to the PE */
501 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
502 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
503 if (retval != ERROR_OK)
504 return retval;
505
506 long long then = timeval_ms();
507 for (;; ) {
508 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
509 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
510 if (retval != ERROR_OK)
511 return retval;
512 if ((dscr & DSCR_HDE) != 0)
513 break;
514 if (timeval_ms() > then + 1000) {
515 LOG_ERROR("Timeout waiting for resume");
516 return ERROR_FAIL;
517 }
518 }
519 }
520
521 target->debug_reason = DBG_REASON_NOTHALTED;
522 target->state = TARGET_RUNNING;
523
524 /* registers are now invalid */
525 register_cache_invalidate(arm->core_cache);
526 register_cache_invalidate(arm->core_cache->next);
527
528 return ERROR_OK;
529 }
530
531 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
532 {
533 int retval = 0;
534 struct target_list *head;
535 struct target *curr;
536 uint64_t address;
537 head = target->head;
538 while (head != (struct target_list *)NULL) {
539 curr = head->target;
540 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
541 /* resume current address , not in step mode */
542 retval += aarch64_internal_restore(curr, 1, &address,
543 handle_breakpoints, 0);
544 retval += aarch64_internal_restart(curr, true);
545 }
546 head = head->next;
547
548 }
549 return retval;
550 }
551
552 static int aarch64_resume(struct target *target, int current,
553 target_addr_t address, int handle_breakpoints, int debug_execution)
554 {
555 int retval = 0;
556 uint64_t addr = address;
557
558 /* dummy resume for smp toggle in order to reduce gdb impact */
559 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
560 /* simulate a start and halt of target */
561 target->gdb_service->target = NULL;
562 target->gdb_service->core[0] = target->gdb_service->core[1];
563 /* fake resume at next poll we play the target core[1], see poll*/
564 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
565 return 0;
566 }
567
568 if (target->state != TARGET_HALTED)
569 return ERROR_TARGET_NOT_HALTED;
570
571 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
572 debug_execution);
573 if (target->smp) {
574 target->gdb_service->core[0] = -1;
575 retval = aarch64_restore_smp(target, handle_breakpoints);
576 if (retval != ERROR_OK)
577 return retval;
578 }
579 aarch64_internal_restart(target, false);
580
581 if (!debug_execution) {
582 target->state = TARGET_RUNNING;
583 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
584 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
585 } else {
586 target->state = TARGET_DEBUG_RUNNING;
587 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
588 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
589 }
590
591 return ERROR_OK;
592 }
593
594 static int aarch64_debug_entry(struct target *target)
595 {
596 int retval = ERROR_OK;
597 struct aarch64_common *aarch64 = target_to_aarch64(target);
598 struct armv8_common *armv8 = target_to_armv8(target);
599 struct arm_dpm *dpm = &armv8->dpm;
600 enum arm_state core_state;
601
602 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
603
604 dpm->dscr = aarch64->cpudbg_dscr;
605 core_state = armv8_dpm_get_core_state(dpm);
606 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
607 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
608
609 /* make sure to clear all sticky errors */
610 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
611 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
612
613 /* discard async exceptions */
614 if (retval == ERROR_OK)
615 retval = dpm->instr_cpsr_sync(dpm);
616
617 if (retval != ERROR_OK)
618 return retval;
619
620 /* Examine debug reason */
621 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
622
623 /* save address of instruction that triggered the watchpoint? */
624 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
625 uint32_t tmp;
626 uint64_t wfar = 0;
627
628 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
629 armv8->debug_base + CPUV8_DBG_WFAR1,
630 &tmp);
631 if (retval != ERROR_OK)
632 return retval;
633 wfar = tmp;
634 wfar = (wfar << 32);
635 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
636 armv8->debug_base + CPUV8_DBG_WFAR0,
637 &tmp);
638 if (retval != ERROR_OK)
639 return retval;
640 wfar |= tmp;
641 armv8_dpm_report_wfar(&armv8->dpm, wfar);
642 }
643
644 retval = armv8_dpm_read_current_registers(&armv8->dpm);
645
646 if (retval == ERROR_OK && armv8->post_debug_entry)
647 retval = armv8->post_debug_entry(target);
648
649 return retval;
650 }
651
652 static int aarch64_post_debug_entry(struct target *target)
653 {
654 struct aarch64_common *aarch64 = target_to_aarch64(target);
655 struct armv8_common *armv8 = &aarch64->armv8_common;
656 int retval;
657 enum arm_mode target_mode = ARM_MODE_ANY;
658 uint32_t instr;
659
660 switch (armv8->arm.core_mode) {
661 case ARMV8_64_EL0T:
662 target_mode = ARMV8_64_EL1H;
663 /* fall through */
664 case ARMV8_64_EL1T:
665 case ARMV8_64_EL1H:
666 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
667 break;
668 case ARMV8_64_EL2T:
669 case ARMV8_64_EL2H:
670 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
671 break;
672 case ARMV8_64_EL3H:
673 case ARMV8_64_EL3T:
674 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
675 break;
676
677 case ARM_MODE_SVC:
678 case ARM_MODE_ABT:
679 case ARM_MODE_FIQ:
680 case ARM_MODE_IRQ:
681 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
682 break;
683
684 default:
685 LOG_INFO("cannot read system control register in this mode");
686 return ERROR_FAIL;
687 }
688
689 if (target_mode != ARM_MODE_ANY)
690 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
691
692 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
693 if (retval != ERROR_OK)
694 return retval;
695
696 if (target_mode != ARM_MODE_ANY)
697 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
698
699 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
700 aarch64->system_control_reg_curr = aarch64->system_control_reg;
701
702 if (armv8->armv8_mmu.armv8_cache.info == -1) {
703 armv8_identify_cache(armv8);
704 armv8_read_mpidr(armv8);
705 }
706
707 armv8->armv8_mmu.mmu_enabled =
708 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
709 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
710 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
711 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
712 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
713 aarch64->curr_mode = armv8->arm.core_mode;
714 return ERROR_OK;
715 }
716
717 static int aarch64_step(struct target *target, int current, target_addr_t address,
718 int handle_breakpoints)
719 {
720 struct armv8_common *armv8 = target_to_armv8(target);
721 int retval;
722 uint32_t edecr;
723
724 if (target->state != TARGET_HALTED) {
725 LOG_WARNING("target not halted");
726 return ERROR_TARGET_NOT_HALTED;
727 }
728
729 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
730 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
731 if (retval != ERROR_OK)
732 return retval;
733
734 /* make sure EDECR.SS is not set when restoring the register */
735 edecr &= ~0x4;
736
737 /* set EDECR.SS to enter hardware step mode */
738 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
739 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
740 if (retval != ERROR_OK)
741 return retval;
742
743 /* disable interrupts while stepping */
744 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
745 if (retval != ERROR_OK)
746 return ERROR_OK;
747
748 /* resume the target */
749 retval = aarch64_resume(target, current, address, 0, 0);
750 if (retval != ERROR_OK)
751 return retval;
752
753 long long then = timeval_ms();
754 while (target->state != TARGET_HALTED) {
755 retval = aarch64_poll(target);
756 if (retval != ERROR_OK)
757 return retval;
758 if (timeval_ms() > then + 1000) {
759 LOG_ERROR("timeout waiting for target halt");
760 return ERROR_FAIL;
761 }
762 }
763
764 /* restore EDECR */
765 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
766 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
767 if (retval != ERROR_OK)
768 return retval;
769
770 /* restore interrupts */
771 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
772 if (retval != ERROR_OK)
773 return ERROR_OK;
774
775 return ERROR_OK;
776 }
777
778 static int aarch64_restore_context(struct target *target, bool bpwp)
779 {
780 struct armv8_common *armv8 = target_to_armv8(target);
781
782 LOG_DEBUG("%s", target_name(target));
783
784 if (armv8->pre_restore_context)
785 armv8->pre_restore_context(target);
786
787 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
788 }
789
790 /*
791 * Cortex-A8 Breakpoint and watchpoint functions
792 */
793
794 /* Setup hardware Breakpoint Register Pair */
795 static int aarch64_set_breakpoint(struct target *target,
796 struct breakpoint *breakpoint, uint8_t matchmode)
797 {
798 int retval;
799 int brp_i = 0;
800 uint32_t control;
801 uint8_t byte_addr_select = 0x0F;
802 struct aarch64_common *aarch64 = target_to_aarch64(target);
803 struct armv8_common *armv8 = &aarch64->armv8_common;
804 struct aarch64_brp *brp_list = aarch64->brp_list;
805
806 if (breakpoint->set) {
807 LOG_WARNING("breakpoint already set");
808 return ERROR_OK;
809 }
810
811 if (breakpoint->type == BKPT_HARD) {
812 int64_t bpt_value;
813 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
814 brp_i++;
815 if (brp_i >= aarch64->brp_num) {
816 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
817 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
818 }
819 breakpoint->set = brp_i + 1;
820 if (breakpoint->length == 2)
821 byte_addr_select = (3 << (breakpoint->address & 0x02));
822 control = ((matchmode & 0x7) << 20)
823 | (1 << 13)
824 | (byte_addr_select << 5)
825 | (3 << 1) | 1;
826 brp_list[brp_i].used = 1;
827 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
828 brp_list[brp_i].control = control;
829 bpt_value = brp_list[brp_i].value;
830
831 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
832 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
833 (uint32_t)(bpt_value & 0xFFFFFFFF));
834 if (retval != ERROR_OK)
835 return retval;
836 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
837 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
838 (uint32_t)(bpt_value >> 32));
839 if (retval != ERROR_OK)
840 return retval;
841
842 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
843 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
844 brp_list[brp_i].control);
845 if (retval != ERROR_OK)
846 return retval;
847 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
848 brp_list[brp_i].control,
849 brp_list[brp_i].value);
850
851 } else if (breakpoint->type == BKPT_SOFT) {
852 uint8_t code[4];
853
854 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
855 retval = target_read_memory(target,
856 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
857 breakpoint->length, 1,
858 breakpoint->orig_instr);
859 if (retval != ERROR_OK)
860 return retval;
861
862 armv8_cache_d_inner_flush_virt(armv8,
863 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
864 breakpoint->length);
865
866 retval = target_write_memory(target,
867 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
868 breakpoint->length, 1, code);
869 if (retval != ERROR_OK)
870 return retval;
871
872 armv8_cache_d_inner_flush_virt(armv8,
873 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
874 breakpoint->length);
875
876 armv8_cache_i_inner_inval_virt(armv8,
877 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
878 breakpoint->length);
879
880 breakpoint->set = 0x11; /* Any nice value but 0 */
881 }
882
883 /* Ensure that halting debug mode is enable */
884 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
885 if (retval != ERROR_OK) {
886 LOG_DEBUG("Failed to set DSCR.HDE");
887 return retval;
888 }
889
890 return ERROR_OK;
891 }
892
893 static int aarch64_set_context_breakpoint(struct target *target,
894 struct breakpoint *breakpoint, uint8_t matchmode)
895 {
896 int retval = ERROR_FAIL;
897 int brp_i = 0;
898 uint32_t control;
899 uint8_t byte_addr_select = 0x0F;
900 struct aarch64_common *aarch64 = target_to_aarch64(target);
901 struct armv8_common *armv8 = &aarch64->armv8_common;
902 struct aarch64_brp *brp_list = aarch64->brp_list;
903
904 if (breakpoint->set) {
905 LOG_WARNING("breakpoint already set");
906 return retval;
907 }
908 /*check available context BRPs*/
909 while ((brp_list[brp_i].used ||
910 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
911 brp_i++;
912
913 if (brp_i >= aarch64->brp_num) {
914 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
915 return ERROR_FAIL;
916 }
917
918 breakpoint->set = brp_i + 1;
919 control = ((matchmode & 0x7) << 20)
920 | (1 << 13)
921 | (byte_addr_select << 5)
922 | (3 << 1) | 1;
923 brp_list[brp_i].used = 1;
924 brp_list[brp_i].value = (breakpoint->asid);
925 brp_list[brp_i].control = control;
926 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
927 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
928 brp_list[brp_i].value);
929 if (retval != ERROR_OK)
930 return retval;
931 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
932 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
933 brp_list[brp_i].control);
934 if (retval != ERROR_OK)
935 return retval;
936 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
937 brp_list[brp_i].control,
938 brp_list[brp_i].value);
939 return ERROR_OK;
940
941 }
942
943 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
944 {
945 int retval = ERROR_FAIL;
946 int brp_1 = 0; /* holds the contextID pair */
947 int brp_2 = 0; /* holds the IVA pair */
948 uint32_t control_CTX, control_IVA;
949 uint8_t CTX_byte_addr_select = 0x0F;
950 uint8_t IVA_byte_addr_select = 0x0F;
951 uint8_t CTX_machmode = 0x03;
952 uint8_t IVA_machmode = 0x01;
953 struct aarch64_common *aarch64 = target_to_aarch64(target);
954 struct armv8_common *armv8 = &aarch64->armv8_common;
955 struct aarch64_brp *brp_list = aarch64->brp_list;
956
957 if (breakpoint->set) {
958 LOG_WARNING("breakpoint already set");
959 return retval;
960 }
961 /*check available context BRPs*/
962 while ((brp_list[brp_1].used ||
963 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
964 brp_1++;
965
966 printf("brp(CTX) found num: %d\n", brp_1);
967 if (brp_1 >= aarch64->brp_num) {
968 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
969 return ERROR_FAIL;
970 }
971
972 while ((brp_list[brp_2].used ||
973 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
974 brp_2++;
975
976 printf("brp(IVA) found num: %d\n", brp_2);
977 if (brp_2 >= aarch64->brp_num) {
978 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
979 return ERROR_FAIL;
980 }
981
982 breakpoint->set = brp_1 + 1;
983 breakpoint->linked_BRP = brp_2;
984 control_CTX = ((CTX_machmode & 0x7) << 20)
985 | (brp_2 << 16)
986 | (0 << 14)
987 | (CTX_byte_addr_select << 5)
988 | (3 << 1) | 1;
989 brp_list[brp_1].used = 1;
990 brp_list[brp_1].value = (breakpoint->asid);
991 brp_list[brp_1].control = control_CTX;
992 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
993 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
994 brp_list[brp_1].value);
995 if (retval != ERROR_OK)
996 return retval;
997 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
998 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
999 brp_list[brp_1].control);
1000 if (retval != ERROR_OK)
1001 return retval;
1002
1003 control_IVA = ((IVA_machmode & 0x7) << 20)
1004 | (brp_1 << 16)
1005 | (1 << 13)
1006 | (IVA_byte_addr_select << 5)
1007 | (3 << 1) | 1;
1008 brp_list[brp_2].used = 1;
1009 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1010 brp_list[brp_2].control = control_IVA;
1011 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1012 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1013 brp_list[brp_2].value & 0xFFFFFFFF);
1014 if (retval != ERROR_OK)
1015 return retval;
1016 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1017 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1018 brp_list[brp_2].value >> 32);
1019 if (retval != ERROR_OK)
1020 return retval;
1021 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1022 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1023 brp_list[brp_2].control);
1024 if (retval != ERROR_OK)
1025 return retval;
1026
1027 return ERROR_OK;
1028 }
1029
1030 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1031 {
1032 int retval;
1033 struct aarch64_common *aarch64 = target_to_aarch64(target);
1034 struct armv8_common *armv8 = &aarch64->armv8_common;
1035 struct aarch64_brp *brp_list = aarch64->brp_list;
1036
1037 if (!breakpoint->set) {
1038 LOG_WARNING("breakpoint not set");
1039 return ERROR_OK;
1040 }
1041
1042 if (breakpoint->type == BKPT_HARD) {
1043 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1044 int brp_i = breakpoint->set - 1;
1045 int brp_j = breakpoint->linked_BRP;
1046 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1047 LOG_DEBUG("Invalid BRP number in breakpoint");
1048 return ERROR_OK;
1049 }
1050 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1051 brp_list[brp_i].control, brp_list[brp_i].value);
1052 brp_list[brp_i].used = 0;
1053 brp_list[brp_i].value = 0;
1054 brp_list[brp_i].control = 0;
1055 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1056 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1057 brp_list[brp_i].control);
1058 if (retval != ERROR_OK)
1059 return retval;
1060 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1061 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1062 (uint32_t)brp_list[brp_i].value);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1066 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1067 (uint32_t)brp_list[brp_i].value);
1068 if (retval != ERROR_OK)
1069 return retval;
1070 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1071 LOG_DEBUG("Invalid BRP number in breakpoint");
1072 return ERROR_OK;
1073 }
1074 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1075 brp_list[brp_j].control, brp_list[brp_j].value);
1076 brp_list[brp_j].used = 0;
1077 brp_list[brp_j].value = 0;
1078 brp_list[brp_j].control = 0;
1079 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1080 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1081 brp_list[brp_j].control);
1082 if (retval != ERROR_OK)
1083 return retval;
1084 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1085 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1086 (uint32_t)brp_list[brp_j].value);
1087 if (retval != ERROR_OK)
1088 return retval;
1089 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1090 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1091 (uint32_t)brp_list[brp_j].value);
1092 if (retval != ERROR_OK)
1093 return retval;
1094
1095 breakpoint->linked_BRP = 0;
1096 breakpoint->set = 0;
1097 return ERROR_OK;
1098
1099 } else {
1100 int brp_i = breakpoint->set - 1;
1101 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1102 LOG_DEBUG("Invalid BRP number in breakpoint");
1103 return ERROR_OK;
1104 }
1105 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1106 brp_list[brp_i].control, brp_list[brp_i].value);
1107 brp_list[brp_i].used = 0;
1108 brp_list[brp_i].value = 0;
1109 brp_list[brp_i].control = 0;
1110 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1111 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1112 brp_list[brp_i].control);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1116 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1117 brp_list[brp_i].value);
1118 if (retval != ERROR_OK)
1119 return retval;
1120
1121 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1122 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1123 (uint32_t)brp_list[brp_i].value);
1124 if (retval != ERROR_OK)
1125 return retval;
1126 breakpoint->set = 0;
1127 return ERROR_OK;
1128 }
1129 } else {
1130 /* restore original instruction (kept in target endianness) */
1131
1132 armv8_cache_d_inner_flush_virt(armv8,
1133 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1134 breakpoint->length);
1135
1136 if (breakpoint->length == 4) {
1137 retval = target_write_memory(target,
1138 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1139 4, 1, breakpoint->orig_instr);
1140 if (retval != ERROR_OK)
1141 return retval;
1142 } else {
1143 retval = target_write_memory(target,
1144 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1145 2, 1, breakpoint->orig_instr);
1146 if (retval != ERROR_OK)
1147 return retval;
1148 }
1149
1150 armv8_cache_d_inner_flush_virt(armv8,
1151 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1152 breakpoint->length);
1153
1154 armv8_cache_i_inner_inval_virt(armv8,
1155 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1156 breakpoint->length);
1157 }
1158 breakpoint->set = 0;
1159
1160 return ERROR_OK;
1161 }
1162
1163 static int aarch64_add_breakpoint(struct target *target,
1164 struct breakpoint *breakpoint)
1165 {
1166 struct aarch64_common *aarch64 = target_to_aarch64(target);
1167
1168 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1169 LOG_INFO("no hardware breakpoint available");
1170 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1171 }
1172
1173 if (breakpoint->type == BKPT_HARD)
1174 aarch64->brp_num_available--;
1175
1176 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1177 }
1178
1179 static int aarch64_add_context_breakpoint(struct target *target,
1180 struct breakpoint *breakpoint)
1181 {
1182 struct aarch64_common *aarch64 = target_to_aarch64(target);
1183
1184 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1185 LOG_INFO("no hardware breakpoint available");
1186 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1187 }
1188
1189 if (breakpoint->type == BKPT_HARD)
1190 aarch64->brp_num_available--;
1191
1192 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1193 }
1194
1195 static int aarch64_add_hybrid_breakpoint(struct target *target,
1196 struct breakpoint *breakpoint)
1197 {
1198 struct aarch64_common *aarch64 = target_to_aarch64(target);
1199
1200 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1201 LOG_INFO("no hardware breakpoint available");
1202 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1203 }
1204
1205 if (breakpoint->type == BKPT_HARD)
1206 aarch64->brp_num_available--;
1207
1208 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1209 }
1210
1211
1212 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1213 {
1214 struct aarch64_common *aarch64 = target_to_aarch64(target);
1215
1216 #if 0
1217 /* It is perfectly possible to remove breakpoints while the target is running */
1218 if (target->state != TARGET_HALTED) {
1219 LOG_WARNING("target not halted");
1220 return ERROR_TARGET_NOT_HALTED;
1221 }
1222 #endif
1223
1224 if (breakpoint->set) {
1225 aarch64_unset_breakpoint(target, breakpoint);
1226 if (breakpoint->type == BKPT_HARD)
1227 aarch64->brp_num_available++;
1228 }
1229
1230 return ERROR_OK;
1231 }
1232
1233 /*
1234 * Cortex-A8 Reset functions
1235 */
1236
1237 static int aarch64_assert_reset(struct target *target)
1238 {
1239 struct armv8_common *armv8 = target_to_armv8(target);
1240
1241 LOG_DEBUG(" ");
1242
1243 /* FIXME when halt is requested, make it work somehow... */
1244
1245 /* Issue some kind of warm reset. */
1246 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1247 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1248 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1249 /* REVISIT handle "pulls" cases, if there's
1250 * hardware that needs them to work.
1251 */
1252 jtag_add_reset(0, 1);
1253 } else {
1254 LOG_ERROR("%s: how to reset?", target_name(target));
1255 return ERROR_FAIL;
1256 }
1257
1258 /* registers are now invalid */
1259 if (target_was_examined(target))
1260 register_cache_invalidate(armv8->arm.core_cache);
1261
1262 target->state = TARGET_RESET;
1263
1264 return ERROR_OK;
1265 }
1266
1267 static int aarch64_deassert_reset(struct target *target)
1268 {
1269 int retval;
1270
1271 LOG_DEBUG(" ");
1272
1273 /* be certain SRST is off */
1274 jtag_add_reset(0, 0);
1275
1276 if (!target_was_examined(target))
1277 return ERROR_OK;
1278
1279 retval = aarch64_poll(target);
1280 if (retval != ERROR_OK)
1281 return retval;
1282
1283 if (target->reset_halt) {
1284 if (target->state != TARGET_HALTED) {
1285 LOG_WARNING("%s: ran after reset and before halt ...",
1286 target_name(target));
1287 retval = target_halt(target);
1288 if (retval != ERROR_OK)
1289 return retval;
1290 }
1291 }
1292
1293 return ERROR_OK;
1294 }
1295
1296 static int aarch64_write_apb_ap_memory(struct target *target,
1297 uint64_t address, uint32_t size,
1298 uint32_t count, const uint8_t *buffer)
1299 {
1300 /* write memory through APB-AP */
1301 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1302 struct armv8_common *armv8 = target_to_armv8(target);
1303 struct arm_dpm *dpm = &armv8->dpm;
1304 struct arm *arm = &armv8->arm;
1305 int total_bytes = count * size;
1306 int total_u32;
1307 int start_byte = address & 0x3;
1308 int end_byte = (address + total_bytes) & 0x3;
1309 struct reg *reg;
1310 uint32_t dscr;
1311 uint8_t *tmp_buff = NULL;
1312
1313 if (target->state != TARGET_HALTED) {
1314 LOG_WARNING("target not halted");
1315 return ERROR_TARGET_NOT_HALTED;
1316 }
1317
1318 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1319
1320 /* Mark register R0 as dirty, as it will be used
1321 * for transferring the data.
1322 * It will be restored automatically when exiting
1323 * debug mode
1324 */
1325 reg = armv8_reg_current(arm, 1);
1326 reg->dirty = true;
1327
1328 reg = armv8_reg_current(arm, 0);
1329 reg->dirty = true;
1330
1331 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1332
1333 /* The algorithm only copies 32 bit words, so the buffer
1334 * should be expanded to include the words at either end.
1335 * The first and last words will be read first to avoid
1336 * corruption if needed.
1337 */
1338 tmp_buff = malloc(total_u32 * 4);
1339
1340 if ((start_byte != 0) && (total_u32 > 1)) {
1341 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1342 * the other bytes in the word.
1343 */
1344 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1345 if (retval != ERROR_OK)
1346 goto error_free_buff_w;
1347 }
1348
1349 /* If end of write is not aligned, or the write is less than 4 bytes */
1350 if ((end_byte != 0) ||
1351 ((total_u32 == 1) && (total_bytes != 4))) {
1352
1353 /* Read the last word to avoid corruption during 32 bit write */
1354 int mem_offset = (total_u32-1) * 4;
1355 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1356 if (retval != ERROR_OK)
1357 goto error_free_buff_w;
1358 }
1359
1360 /* Copy the write buffer over the top of the temporary buffer */
1361 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1362
1363 /* We now have a 32 bit aligned buffer that can be written */
1364
1365 /* Read DSCR */
1366 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1367 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1368 if (retval != ERROR_OK)
1369 goto error_free_buff_w;
1370
1371 /* Set Normal access mode */
1372 dscr = (dscr & ~DSCR_MA);
1373 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1374 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1375
1376 if (arm->core_state == ARM_STATE_AARCH64) {
1377 /* Write X0 with value 'address' using write procedure */
1378 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1379 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1380 retval = dpm->instr_write_data_dcc_64(dpm,
1381 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1382 } else {
1383 /* Write R0 with value 'address' using write procedure */
1384 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1385 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1386 dpm->instr_write_data_dcc(dpm,
1387 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1388
1389 }
1390 /* Step 1.d - Change DCC to memory mode */
1391 dscr = dscr | DSCR_MA;
1392 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1393 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1394 if (retval != ERROR_OK)
1395 goto error_unset_dtr_w;
1396
1397
1398 /* Step 2.a - Do the write */
1399 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1400 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1401 if (retval != ERROR_OK)
1402 goto error_unset_dtr_w;
1403
1404 /* Step 3.a - Switch DTR mode back to Normal mode */
1405 dscr = (dscr & ~DSCR_MA);
1406 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1407 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1408 if (retval != ERROR_OK)
1409 goto error_unset_dtr_w;
1410
1411 /* Check for sticky abort flags in the DSCR */
1412 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1413 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1414 if (retval != ERROR_OK)
1415 goto error_free_buff_w;
1416
1417 dpm->dscr = dscr;
1418 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1419 /* Abort occurred - clear it and exit */
1420 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1421 armv8_dpm_handle_exception(dpm);
1422 goto error_free_buff_w;
1423 }
1424
1425 /* Done */
1426 free(tmp_buff);
1427 return ERROR_OK;
1428
1429 error_unset_dtr_w:
1430 /* Unset DTR mode */
1431 mem_ap_read_atomic_u32(armv8->debug_ap,
1432 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1433 dscr = (dscr & ~DSCR_MA);
1434 mem_ap_write_atomic_u32(armv8->debug_ap,
1435 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1436 error_free_buff_w:
1437 LOG_ERROR("error");
1438 free(tmp_buff);
1439 return ERROR_FAIL;
1440 }
1441
1442 static int aarch64_read_apb_ap_memory(struct target *target,
1443 target_addr_t address, uint32_t size,
1444 uint32_t count, uint8_t *buffer)
1445 {
1446 /* read memory through APB-AP */
1447 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1448 struct armv8_common *armv8 = target_to_armv8(target);
1449 struct arm_dpm *dpm = &armv8->dpm;
1450 struct arm *arm = &armv8->arm;
1451 int total_bytes = count * size;
1452 int total_u32;
1453 int start_byte = address & 0x3;
1454 int end_byte = (address + total_bytes) & 0x3;
1455 struct reg *reg;
1456 uint32_t dscr;
1457 uint8_t *tmp_buff = NULL;
1458 uint8_t *u8buf_ptr;
1459 uint32_t value;
1460
1461 if (target->state != TARGET_HALTED) {
1462 LOG_WARNING("target not halted");
1463 return ERROR_TARGET_NOT_HALTED;
1464 }
1465
1466 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1467 /* Mark register X0, X1 as dirty, as it will be used
1468 * for transferring the data.
1469 * It will be restored automatically when exiting
1470 * debug mode
1471 */
1472 reg = armv8_reg_current(arm, 1);
1473 reg->dirty = true;
1474
1475 reg = armv8_reg_current(arm, 0);
1476 reg->dirty = true;
1477
1478 /* Read DSCR */
1479 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1480 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1481
1482 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1483
1484 /* Set Normal access mode */
1485 dscr = (dscr & ~DSCR_MA);
1486 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1487 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1488
1489 if (arm->core_state == ARM_STATE_AARCH64) {
1490 /* Write X0 with value 'address' using write procedure */
1491 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1492 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1493 retval += dpm->instr_write_data_dcc_64(dpm,
1494 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1495 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1496 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1497 /* Step 1.e - Change DCC to memory mode */
1498 dscr = dscr | DSCR_MA;
1499 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1500 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1501 /* Step 1.f - read DBGDTRTX and discard the value */
1502 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1503 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1504 } else {
1505 /* Write R0 with value 'address' using write procedure */
1506 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1507 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1508 retval += dpm->instr_write_data_dcc(dpm,
1509 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1510 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1511 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1512 /* Step 1.e - Change DCC to memory mode */
1513 dscr = dscr | DSCR_MA;
1514 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1515 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1516 /* Step 1.f - read DBGDTRTX and discard the value */
1517 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1518 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1519
1520 }
1521 if (retval != ERROR_OK)
1522 goto error_unset_dtr_r;
1523
1524 /* Optimize the read as much as we can, either way we read in a single pass */
1525 if ((start_byte) || (end_byte)) {
1526 /* The algorithm only copies 32 bit words, so the buffer
1527 * should be expanded to include the words at either end.
1528 * The first and last words will be read into a temp buffer
1529 * to avoid corruption
1530 */
1531 tmp_buff = malloc(total_u32 * 4);
1532 if (!tmp_buff)
1533 goto error_unset_dtr_r;
1534
1535 /* use the tmp buffer to read the entire data */
1536 u8buf_ptr = tmp_buff;
1537 } else
1538 /* address and read length are aligned so read directly into the passed buffer */
1539 u8buf_ptr = buffer;
1540
1541 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1542 * Abort flags are sticky, so can be read at end of transactions
1543 *
1544 * This data is read in aligned to 32 bit boundary.
1545 */
1546
1547 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1548 * increments X0 by 4. */
1549 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1550 armv8->debug_base + CPUV8_DBG_DTRTX);
1551 if (retval != ERROR_OK)
1552 goto error_unset_dtr_r;
1553
1554 /* Step 3.a - set DTR access mode back to Normal mode */
1555 dscr = (dscr & ~DSCR_MA);
1556 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1557 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1558 if (retval != ERROR_OK)
1559 goto error_free_buff_r;
1560
1561 /* Step 3.b - read DBGDTRTX for the final value */
1562 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1563 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1564 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1565
1566 /* Check for sticky abort flags in the DSCR */
1567 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1568 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1569 if (retval != ERROR_OK)
1570 goto error_free_buff_r;
1571
1572 dpm->dscr = dscr;
1573
1574 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1575 /* Abort occurred - clear it and exit */
1576 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1577 armv8_dpm_handle_exception(dpm);
1578 goto error_free_buff_r;
1579 }
1580
1581 /* check if we need to copy aligned data by applying any shift necessary */
1582 if (tmp_buff) {
1583 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1584 free(tmp_buff);
1585 }
1586
1587 /* Done */
1588 return ERROR_OK;
1589
1590 error_unset_dtr_r:
1591 /* Unset DTR mode */
1592 mem_ap_read_atomic_u32(armv8->debug_ap,
1593 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1594 dscr = (dscr & ~DSCR_MA);
1595 mem_ap_write_atomic_u32(armv8->debug_ap,
1596 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1597 error_free_buff_r:
1598 LOG_ERROR("error");
1599 free(tmp_buff);
1600 return ERROR_FAIL;
1601 }
1602
1603 static int aarch64_read_phys_memory(struct target *target,
1604 target_addr_t address, uint32_t size,
1605 uint32_t count, uint8_t *buffer)
1606 {
1607 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1608
1609 if (count && buffer) {
1610 /* read memory through APB-AP */
1611 retval = aarch64_mmu_modify(target, 0);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1615 }
1616 return retval;
1617 }
1618
1619 static int aarch64_read_memory(struct target *target, target_addr_t address,
1620 uint32_t size, uint32_t count, uint8_t *buffer)
1621 {
1622 int mmu_enabled = 0;
1623 int retval;
1624
1625 /* determine if MMU was enabled on target stop */
1626 retval = aarch64_mmu(target, &mmu_enabled);
1627 if (retval != ERROR_OK)
1628 return retval;
1629
1630 if (mmu_enabled) {
1631 /* enable MMU as we could have disabled it for phys access */
1632 retval = aarch64_mmu_modify(target, 1);
1633 if (retval != ERROR_OK)
1634 return retval;
1635 }
1636 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1637 }
1638
1639 static int aarch64_write_phys_memory(struct target *target,
1640 target_addr_t address, uint32_t size,
1641 uint32_t count, const uint8_t *buffer)
1642 {
1643 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1644
1645 if (count && buffer) {
1646 /* write memory through APB-AP */
1647 retval = aarch64_mmu_modify(target, 0);
1648 if (retval != ERROR_OK)
1649 return retval;
1650 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1651 }
1652
1653 return retval;
1654 }
1655
1656 static int aarch64_write_memory(struct target *target, target_addr_t address,
1657 uint32_t size, uint32_t count, const uint8_t *buffer)
1658 {
1659 int mmu_enabled = 0;
1660 int retval;
1661
1662 /* determine if MMU was enabled on target stop */
1663 retval = aarch64_mmu(target, &mmu_enabled);
1664 if (retval != ERROR_OK)
1665 return retval;
1666
1667 if (mmu_enabled) {
1668 /* enable MMU as we could have disabled it for phys access */
1669 retval = aarch64_mmu_modify(target, 1);
1670 if (retval != ERROR_OK)
1671 return retval;
1672 }
1673 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1674 }
1675
1676 static int aarch64_handle_target_request(void *priv)
1677 {
1678 struct target *target = priv;
1679 struct armv8_common *armv8 = target_to_armv8(target);
1680 int retval;
1681
1682 if (!target_was_examined(target))
1683 return ERROR_OK;
1684 if (!target->dbg_msg_enabled)
1685 return ERROR_OK;
1686
1687 if (target->state == TARGET_RUNNING) {
1688 uint32_t request;
1689 uint32_t dscr;
1690 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1691 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1692
1693 /* check if we have data */
1694 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1695 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1696 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1697 if (retval == ERROR_OK) {
1698 target_request(target, request);
1699 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1700 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1701 }
1702 }
1703 }
1704
1705 return ERROR_OK;
1706 }
1707
1708 static int aarch64_examine_first(struct target *target)
1709 {
1710 struct aarch64_common *aarch64 = target_to_aarch64(target);
1711 struct armv8_common *armv8 = &aarch64->armv8_common;
1712 struct adiv5_dap *swjdp = armv8->arm.dap;
1713 int i;
1714 int retval = ERROR_OK;
1715 uint64_t debug, ttypr;
1716 uint32_t cpuid;
1717 uint32_t tmp0, tmp1;
1718 debug = ttypr = cpuid = 0;
1719
1720 /* We do one extra read to ensure DAP is configured,
1721 * we call ahbap_debugport_init(swjdp) instead
1722 */
1723 retval = dap_dp_init(swjdp);
1724 if (retval != ERROR_OK)
1725 return retval;
1726
1727 /* Search for the APB-AB - it is needed for access to debug registers */
1728 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1729 if (retval != ERROR_OK) {
1730 LOG_ERROR("Could not find APB-AP for debug access");
1731 return retval;
1732 }
1733
1734 retval = mem_ap_init(armv8->debug_ap);
1735 if (retval != ERROR_OK) {
1736 LOG_ERROR("Could not initialize the APB-AP");
1737 return retval;
1738 }
1739
1740 armv8->debug_ap->memaccess_tck = 80;
1741
1742 if (!target->dbgbase_set) {
1743 uint32_t dbgbase;
1744 /* Get ROM Table base */
1745 uint32_t apid;
1746 int32_t coreidx = target->coreid;
1747 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1748 if (retval != ERROR_OK)
1749 return retval;
1750 /* Lookup 0x15 -- Processor DAP */
1751 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1752 &armv8->debug_base, &coreidx);
1753 if (retval != ERROR_OK)
1754 return retval;
1755 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1756 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1757 } else
1758 armv8->debug_base = target->dbgbase;
1759
1760 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1761 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1762 if (retval != ERROR_OK) {
1763 LOG_DEBUG("LOCK debug access fail");
1764 return retval;
1765 }
1766
1767 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1768 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1769 if (retval != ERROR_OK) {
1770 LOG_DEBUG("Examine %s failed", "oslock");
1771 return retval;
1772 }
1773
1774 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1775 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1776 if (retval != ERROR_OK) {
1777 LOG_DEBUG("Examine %s failed", "CPUID");
1778 return retval;
1779 }
1780
1781 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1782 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1783 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1784 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1785 if (retval != ERROR_OK) {
1786 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1787 return retval;
1788 }
1789 ttypr |= tmp1;
1790 ttypr = (ttypr << 32) | tmp0;
1791
1792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1793 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1794 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1795 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1796 if (retval != ERROR_OK) {
1797 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1798 return retval;
1799 }
1800 debug |= tmp1;
1801 debug = (debug << 32) | tmp0;
1802
1803 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1804 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1805 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1806
1807 if (target->ctibase == 0) {
1808 /* assume a v8 rom table layout */
1809 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1810 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1811 } else
1812 armv8->cti_base = target->ctibase;
1813
1814 armv8->arm.core_type = ARM_MODE_MON;
1815 retval = aarch64_dpm_setup(aarch64, debug);
1816 if (retval != ERROR_OK)
1817 return retval;
1818
1819 /* Setup Breakpoint Register Pairs */
1820 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1821 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1822 aarch64->brp_num_available = aarch64->brp_num;
1823 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1824 for (i = 0; i < aarch64->brp_num; i++) {
1825 aarch64->brp_list[i].used = 0;
1826 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1827 aarch64->brp_list[i].type = BRP_NORMAL;
1828 else
1829 aarch64->brp_list[i].type = BRP_CONTEXT;
1830 aarch64->brp_list[i].value = 0;
1831 aarch64->brp_list[i].control = 0;
1832 aarch64->brp_list[i].BRPn = i;
1833 }
1834
1835 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1836
1837 target_set_examined(target);
1838 return ERROR_OK;
1839 }
1840
1841 static int aarch64_examine(struct target *target)
1842 {
1843 int retval = ERROR_OK;
1844
1845 /* don't re-probe hardware after each reset */
1846 if (!target_was_examined(target))
1847 retval = aarch64_examine_first(target);
1848
1849 /* Configure core debug access */
1850 if (retval == ERROR_OK)
1851 retval = aarch64_init_debug_access(target);
1852
1853 return retval;
1854 }
1855
1856 /*
1857 * Cortex-A8 target creation and initialization
1858 */
1859
1860 static int aarch64_init_target(struct command_context *cmd_ctx,
1861 struct target *target)
1862 {
1863 /* examine_first() does a bunch of this */
1864 return ERROR_OK;
1865 }
1866
1867 static int aarch64_init_arch_info(struct target *target,
1868 struct aarch64_common *aarch64, struct jtag_tap *tap)
1869 {
1870 struct armv8_common *armv8 = &aarch64->armv8_common;
1871 struct adiv5_dap *dap = armv8->arm.dap;
1872
1873 armv8->arm.dap = dap;
1874
1875 /* Setup struct aarch64_common */
1876 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1877 /* tap has no dap initialized */
1878 if (!tap->dap) {
1879 tap->dap = dap_init();
1880
1881 /* Leave (only) generic DAP stuff for debugport_init() */
1882 tap->dap->tap = tap;
1883 }
1884
1885 armv8->arm.dap = tap->dap;
1886
1887 aarch64->fast_reg_read = 0;
1888
1889 /* register arch-specific functions */
1890 armv8->examine_debug_reason = NULL;
1891
1892 armv8->post_debug_entry = aarch64_post_debug_entry;
1893
1894 armv8->pre_restore_context = NULL;
1895
1896 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1897
1898 /* REVISIT v7a setup should be in a v7a-specific routine */
1899 armv8_init_arch_info(target, armv8);
1900 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1901
1902 return ERROR_OK;
1903 }
1904
1905 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1906 {
1907 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1908
1909 return aarch64_init_arch_info(target, aarch64, target->tap);
1910 }
1911
1912 static int aarch64_mmu(struct target *target, int *enabled)
1913 {
1914 if (target->state != TARGET_HALTED) {
1915 LOG_ERROR("%s: target not halted", __func__);
1916 return ERROR_TARGET_INVALID;
1917 }
1918
1919 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
1920 return ERROR_OK;
1921 }
1922
1923 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
1924 target_addr_t *phys)
1925 {
1926 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
1927 }
1928
1929 COMMAND_HANDLER(aarch64_handle_cache_info_command)
1930 {
1931 struct target *target = get_current_target(CMD_CTX);
1932 struct armv8_common *armv8 = target_to_armv8(target);
1933
1934 return armv8_handle_cache_info_command(CMD_CTX,
1935 &armv8->armv8_mmu.armv8_cache);
1936 }
1937
1938
1939 COMMAND_HANDLER(aarch64_handle_dbginit_command)
1940 {
1941 struct target *target = get_current_target(CMD_CTX);
1942 if (!target_was_examined(target)) {
1943 LOG_ERROR("target not examined yet");
1944 return ERROR_FAIL;
1945 }
1946
1947 return aarch64_init_debug_access(target);
1948 }
1949 COMMAND_HANDLER(aarch64_handle_smp_off_command)
1950 {
1951 struct target *target = get_current_target(CMD_CTX);
1952 /* check target is an smp target */
1953 struct target_list *head;
1954 struct target *curr;
1955 head = target->head;
1956 target->smp = 0;
1957 if (head != (struct target_list *)NULL) {
1958 while (head != (struct target_list *)NULL) {
1959 curr = head->target;
1960 curr->smp = 0;
1961 head = head->next;
1962 }
1963 /* fixes the target display to the debugger */
1964 target->gdb_service->target = target;
1965 }
1966 return ERROR_OK;
1967 }
1968
1969 COMMAND_HANDLER(aarch64_handle_smp_on_command)
1970 {
1971 struct target *target = get_current_target(CMD_CTX);
1972 struct target_list *head;
1973 struct target *curr;
1974 head = target->head;
1975 if (head != (struct target_list *)NULL) {
1976 target->smp = 1;
1977 while (head != (struct target_list *)NULL) {
1978 curr = head->target;
1979 curr->smp = 1;
1980 head = head->next;
1981 }
1982 }
1983 return ERROR_OK;
1984 }
1985
1986 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
1987 {
1988 struct target *target = get_current_target(CMD_CTX);
1989 int retval = ERROR_OK;
1990 struct target_list *head;
1991 head = target->head;
1992 if (head != (struct target_list *)NULL) {
1993 if (CMD_ARGC == 1) {
1994 int coreid = 0;
1995 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
1996 if (ERROR_OK != retval)
1997 return retval;
1998 target->gdb_service->core[1] = coreid;
1999
2000 }
2001 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2002 , target->gdb_service->core[1]);
2003 }
2004 return ERROR_OK;
2005 }
2006
2007 static const struct command_registration aarch64_exec_command_handlers[] = {
2008 {
2009 .name = "cache_info",
2010 .handler = aarch64_handle_cache_info_command,
2011 .mode = COMMAND_EXEC,
2012 .help = "display information about target caches",
2013 .usage = "",
2014 },
2015 {
2016 .name = "dbginit",
2017 .handler = aarch64_handle_dbginit_command,
2018 .mode = COMMAND_EXEC,
2019 .help = "Initialize core debug",
2020 .usage = "",
2021 },
2022 { .name = "smp_off",
2023 .handler = aarch64_handle_smp_off_command,
2024 .mode = COMMAND_EXEC,
2025 .help = "Stop smp handling",
2026 .usage = "",
2027 },
2028 {
2029 .name = "smp_on",
2030 .handler = aarch64_handle_smp_on_command,
2031 .mode = COMMAND_EXEC,
2032 .help = "Restart smp handling",
2033 .usage = "",
2034 },
2035 {
2036 .name = "smp_gdb",
2037 .handler = aarch64_handle_smp_gdb_command,
2038 .mode = COMMAND_EXEC,
2039 .help = "display/fix current core played to gdb",
2040 .usage = "",
2041 },
2042
2043
2044 COMMAND_REGISTRATION_DONE
2045 };
2046 static const struct command_registration aarch64_command_handlers[] = {
2047 {
2048 .chain = armv8_command_handlers,
2049 },
2050 {
2051 .name = "cortex_a",
2052 .mode = COMMAND_ANY,
2053 .help = "Cortex-A command group",
2054 .usage = "",
2055 .chain = aarch64_exec_command_handlers,
2056 },
2057 COMMAND_REGISTRATION_DONE
2058 };
2059
2060 struct target_type aarch64_target = {
2061 .name = "aarch64",
2062
2063 .poll = aarch64_poll,
2064 .arch_state = armv8_arch_state,
2065
2066 .halt = aarch64_halt,
2067 .resume = aarch64_resume,
2068 .step = aarch64_step,
2069
2070 .assert_reset = aarch64_assert_reset,
2071 .deassert_reset = aarch64_deassert_reset,
2072
2073 /* REVISIT allow exporting VFP3 registers ... */
2074 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2075
2076 .read_memory = aarch64_read_memory,
2077 .write_memory = aarch64_write_memory,
2078
2079 .checksum_memory = arm_checksum_memory,
2080 .blank_check_memory = arm_blank_check_memory,
2081
2082 .run_algorithm = armv4_5_run_algorithm,
2083
2084 .add_breakpoint = aarch64_add_breakpoint,
2085 .add_context_breakpoint = aarch64_add_context_breakpoint,
2086 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2087 .remove_breakpoint = aarch64_remove_breakpoint,
2088 .add_watchpoint = NULL,
2089 .remove_watchpoint = NULL,
2090
2091 .commands = aarch64_command_handlers,
2092 .target_create = aarch64_target_create,
2093 .init_target = aarch64_init_target,
2094 .examine = aarch64_examine,
2095
2096 .read_phys_memory = aarch64_read_phys_memory,
2097 .write_phys_memory = aarch64_write_phys_memory,
2098 .mmu = aarch64_mmu,
2099 .virt2phys = aarch64_virt2phys,
2100 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)