aarch64: armv8 cache functions update
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static struct target *get_aarch64(struct target *target, int32_t coreid)
283 {
284 struct target_list *head;
285 struct target *curr;
286
287 head = target->head;
288 while (head != (struct target_list *)NULL) {
289 curr = head->target;
290 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
291 return curr;
292 head = head->next;
293 }
294 return target;
295 }
296 static int aarch64_halt(struct target *target);
297
298 static int aarch64_halt_smp(struct target *target)
299 {
300 int retval = ERROR_OK;
301 struct target_list *head = target->head;
302
303 while (head != (struct target_list *)NULL) {
304 struct target *curr = head->target;
305 struct armv8_common *armv8 = target_to_armv8(curr);
306
307 /* open the gate for channel 0 to let HALT requests pass to the CTM */
308 if (curr->smp)
309 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
310 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
311 if (retval != ERROR_OK)
312 break;
313
314 head = head->next;
315 }
316
317 /* halt the target PE */
318 if (retval == ERROR_OK)
319 retval = aarch64_halt(target);
320
321 return retval;
322 }
323
324 static int update_halt_gdb(struct target *target)
325 {
326 int retval = 0;
327 if (target->gdb_service && target->gdb_service->core[0] == -1) {
328 target->gdb_service->target = target;
329 target->gdb_service->core[0] = target->coreid;
330 retval += aarch64_halt_smp(target);
331 }
332 return retval;
333 }
334
335 /*
336 * Cortex-A8 Run control
337 */
338
339 static int aarch64_poll(struct target *target)
340 {
341 int retval = ERROR_OK;
342 uint32_t dscr;
343 struct aarch64_common *aarch64 = target_to_aarch64(target);
344 struct armv8_common *armv8 = &aarch64->armv8_common;
345 enum target_state prev_target_state = target->state;
346 /* toggle to another core is done by gdb as follow */
347 /* maint packet J core_id */
348 /* continue */
349 /* the next polling trigger an halt event sent to gdb */
350 if ((target->state == TARGET_HALTED) && (target->smp) &&
351 (target->gdb_service) &&
352 (target->gdb_service->target == NULL)) {
353 target->gdb_service->target =
354 get_aarch64(target, target->gdb_service->core[1]);
355 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
356 return retval;
357 }
358 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
359 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 aarch64->cpudbg_dscr = dscr;
363
364 if (DSCR_RUN_MODE(dscr) == 0x3) {
365 if (prev_target_state != TARGET_HALTED) {
366 /* We have a halting debug event */
367 LOG_DEBUG("Target halted");
368 target->state = TARGET_HALTED;
369 if ((prev_target_state == TARGET_RUNNING)
370 || (prev_target_state == TARGET_UNKNOWN)
371 || (prev_target_state == TARGET_RESET)) {
372 retval = aarch64_debug_entry(target);
373 if (retval != ERROR_OK)
374 return retval;
375 if (target->smp) {
376 retval = update_halt_gdb(target);
377 if (retval != ERROR_OK)
378 return retval;
379 }
380 target_call_event_callbacks(target,
381 TARGET_EVENT_HALTED);
382 }
383 if (prev_target_state == TARGET_DEBUG_RUNNING) {
384 LOG_DEBUG(" ");
385
386 retval = aarch64_debug_entry(target);
387 if (retval != ERROR_OK)
388 return retval;
389 if (target->smp) {
390 retval = update_halt_gdb(target);
391 if (retval != ERROR_OK)
392 return retval;
393 }
394
395 target_call_event_callbacks(target,
396 TARGET_EVENT_DEBUG_HALTED);
397 }
398 }
399 } else
400 target->state = TARGET_RUNNING;
401
402 return retval;
403 }
404
405 static int aarch64_halt(struct target *target)
406 {
407 int retval = ERROR_OK;
408 uint32_t dscr;
409 struct armv8_common *armv8 = target_to_armv8(target);
410
411 /*
412 * add HDE in halting debug mode
413 */
414 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
415 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
416 if (retval == ERROR_OK)
417 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
418 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
419 if (retval != ERROR_OK)
420 return retval;
421
422 /* trigger an event on channel 0, this outputs a halt request to the PE */
423 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
424 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
425 if (retval != ERROR_OK)
426 return retval;
427
428 long long then = timeval_ms();
429 for (;; ) {
430 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
431 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
432 if (retval != ERROR_OK)
433 return retval;
434 if ((dscr & DSCRV8_HALT_MASK) != 0)
435 break;
436 if (timeval_ms() > then + 1000) {
437 LOG_ERROR("Timeout waiting for halt");
438 return ERROR_FAIL;
439 }
440 }
441
442 target->debug_reason = DBG_REASON_DBGRQ;
443
444 return ERROR_OK;
445 }
446
447 static int aarch64_internal_restore(struct target *target, int current,
448 uint64_t *address, int handle_breakpoints, int debug_execution)
449 {
450 struct armv8_common *armv8 = target_to_armv8(target);
451 struct arm *arm = &armv8->arm;
452 int retval;
453 uint64_t resume_pc;
454
455 if (!debug_execution)
456 target_free_all_working_areas(target);
457
458 /* current = 1: continue on current pc, otherwise continue at <address> */
459 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
460 if (!current)
461 resume_pc = *address;
462 else
463 *address = resume_pc;
464
465 /* Make sure that the Armv7 gdb thumb fixups does not
466 * kill the return address
467 */
468 switch (arm->core_state) {
469 case ARM_STATE_ARM:
470 resume_pc &= 0xFFFFFFFC;
471 break;
472 case ARM_STATE_AARCH64:
473 resume_pc &= 0xFFFFFFFFFFFFFFFC;
474 break;
475 case ARM_STATE_THUMB:
476 case ARM_STATE_THUMB_EE:
477 /* When the return address is loaded into PC
478 * bit 0 must be 1 to stay in Thumb state
479 */
480 resume_pc |= 0x1;
481 break;
482 case ARM_STATE_JAZELLE:
483 LOG_ERROR("How do I resume into Jazelle state??");
484 return ERROR_FAIL;
485 }
486 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
487 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
488 arm->pc->dirty = 1;
489 arm->pc->valid = 1;
490 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
491
492 /* called it now before restoring context because it uses cpu
493 * register r0 for restoring system control register */
494 retval = aarch64_restore_system_control_reg(target);
495 if (retval != ERROR_OK)
496 return retval;
497 retval = aarch64_restore_context(target, handle_breakpoints);
498 if (retval != ERROR_OK)
499 return retval;
500 target->debug_reason = DBG_REASON_NOTHALTED;
501 target->state = TARGET_RUNNING;
502
503 /* registers are now invalid */
504 register_cache_invalidate(arm->core_cache);
505
506 return retval;
507 }
508
509 static int aarch64_internal_restart(struct target *target, bool slave_pe)
510 {
511 struct armv8_common *armv8 = target_to_armv8(target);
512 struct arm *arm = &armv8->arm;
513 int retval;
514 uint32_t dscr;
515 /*
516 * * Restart core and wait for it to be started. Clear ITRen and sticky
517 * * exception flags: see ARMv7 ARM, C5.9.
518 *
519 * REVISIT: for single stepping, we probably want to
520 * disable IRQs by default, with optional override...
521 */
522
523 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
524 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if ((dscr & DSCR_ITE) == 0)
529 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
530
531 /* make sure to acknowledge the halt event before resuming */
532 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
533 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
534
535 /*
536 * open the CTI gate for channel 1 so that the restart events
537 * get passed along to all PEs
538 */
539 if (retval == ERROR_OK)
540 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
541 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
542 if (retval != ERROR_OK)
543 return retval;
544
545 if (!slave_pe) {
546 /* trigger an event on channel 1, generates a restart request to the PE */
547 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
548 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
549 if (retval != ERROR_OK)
550 return retval;
551
552 long long then = timeval_ms();
553 for (;; ) {
554 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
555 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
556 if (retval != ERROR_OK)
557 return retval;
558 if ((dscr & DSCR_HDE) != 0)
559 break;
560 if (timeval_ms() > then + 1000) {
561 LOG_ERROR("Timeout waiting for resume");
562 return ERROR_FAIL;
563 }
564 }
565 }
566
567 target->debug_reason = DBG_REASON_NOTHALTED;
568 target->state = TARGET_RUNNING;
569
570 /* registers are now invalid */
571 register_cache_invalidate(arm->core_cache);
572
573 return ERROR_OK;
574 }
575
576 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
577 {
578 int retval = 0;
579 struct target_list *head;
580 struct target *curr;
581 uint64_t address;
582 head = target->head;
583 while (head != (struct target_list *)NULL) {
584 curr = head->target;
585 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
586 /* resume current address , not in step mode */
587 retval += aarch64_internal_restore(curr, 1, &address,
588 handle_breakpoints, 0);
589 retval += aarch64_internal_restart(curr, true);
590 }
591 head = head->next;
592
593 }
594 return retval;
595 }
596
597 static int aarch64_resume(struct target *target, int current,
598 target_addr_t address, int handle_breakpoints, int debug_execution)
599 {
600 int retval = 0;
601 uint64_t addr = address;
602
603 /* dummy resume for smp toggle in order to reduce gdb impact */
604 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
605 /* simulate a start and halt of target */
606 target->gdb_service->target = NULL;
607 target->gdb_service->core[0] = target->gdb_service->core[1];
608 /* fake resume at next poll we play the target core[1], see poll*/
609 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
610 return 0;
611 }
612 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
613 debug_execution);
614 if (target->smp) {
615 target->gdb_service->core[0] = -1;
616 retval = aarch64_restore_smp(target, handle_breakpoints);
617 if (retval != ERROR_OK)
618 return retval;
619 }
620 aarch64_internal_restart(target, false);
621
622 if (!debug_execution) {
623 target->state = TARGET_RUNNING;
624 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
625 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
626 } else {
627 target->state = TARGET_DEBUG_RUNNING;
628 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
629 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
630 }
631
632 return ERROR_OK;
633 }
634
635 static int aarch64_debug_entry(struct target *target)
636 {
637 int retval = ERROR_OK;
638 struct aarch64_common *aarch64 = target_to_aarch64(target);
639 struct armv8_common *armv8 = target_to_armv8(target);
640
641 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
642
643 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
644 * imprecise data aborts get discarded by issuing a Data
645 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
646 */
647
648 /* make sure to clear all sticky errors */
649 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
650 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
651 if (retval != ERROR_OK)
652 return retval;
653
654 /* Examine debug reason */
655 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
656
657 /* save address of instruction that triggered the watchpoint? */
658 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
659 uint32_t tmp;
660 uint64_t wfar = 0;
661
662 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
663 armv8->debug_base + CPUV8_DBG_WFAR1,
664 &tmp);
665 if (retval != ERROR_OK)
666 return retval;
667 wfar = tmp;
668 wfar = (wfar << 32);
669 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
670 armv8->debug_base + CPUV8_DBG_WFAR0,
671 &tmp);
672 if (retval != ERROR_OK)
673 return retval;
674 wfar |= tmp;
675 armv8_dpm_report_wfar(&armv8->dpm, wfar);
676 }
677
678 retval = armv8_dpm_read_current_registers(&armv8->dpm);
679
680 if (armv8->post_debug_entry) {
681 retval = armv8->post_debug_entry(target);
682 if (retval != ERROR_OK)
683 return retval;
684 }
685
686 return retval;
687 }
688
689 static int aarch64_post_debug_entry(struct target *target)
690 {
691 struct aarch64_common *aarch64 = target_to_aarch64(target);
692 struct armv8_common *armv8 = &aarch64->armv8_common;
693 int retval;
694
695 /* clear sticky errors */
696 mem_ap_write_atomic_u32(armv8->debug_ap,
697 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
698
699 switch (armv8->arm.core_mode) {
700 case ARMV8_64_EL0T:
701 case ARMV8_64_EL1T:
702 case ARMV8_64_EL1H:
703 retval = armv8->arm.mrs(target, 3, /*op 0*/
704 0, 0, /* op1, op2 */
705 1, 0, /* CRn, CRm */
706 &aarch64->system_control_reg);
707 if (retval != ERROR_OK)
708 return retval;
709 break;
710 case ARMV8_64_EL2T:
711 case ARMV8_64_EL2H:
712 retval = armv8->arm.mrs(target, 3, /*op 0*/
713 4, 0, /* op1, op2 */
714 1, 0, /* CRn, CRm */
715 &aarch64->system_control_reg);
716 if (retval != ERROR_OK)
717 return retval;
718 break;
719 case ARMV8_64_EL3H:
720 case ARMV8_64_EL3T:
721 retval = armv8->arm.mrs(target, 3, /*op 0*/
722 6, 0, /* op1, op2 */
723 1, 0, /* CRn, CRm */
724 &aarch64->system_control_reg);
725 if (retval != ERROR_OK)
726 return retval;
727 break;
728 default:
729 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 }
734
735 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
736 aarch64->system_control_reg_curr = aarch64->system_control_reg;
737
738 if (armv8->armv8_mmu.armv8_cache.info == -1) {
739 armv8_identify_cache(armv8);
740 armv8_read_mpidr(armv8);
741 }
742
743 armv8->armv8_mmu.mmu_enabled =
744 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
745 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
746 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
747 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
748 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
749 aarch64->curr_mode = armv8->arm.core_mode;
750 return ERROR_OK;
751 }
752
753 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
754 {
755 struct armv8_common *armv8 = target_to_armv8(target);
756 uint32_t dscr;
757
758 /* Read DSCR */
759 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
760 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
761 if (ERROR_OK != retval)
762 return retval;
763
764 /* clear bitfield */
765 dscr &= ~bit_mask;
766 /* put new value */
767 dscr |= value & bit_mask;
768
769 /* write new DSCR */
770 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
771 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
772 return retval;
773 }
774
775 static int aarch64_step(struct target *target, int current, target_addr_t address,
776 int handle_breakpoints)
777 {
778 struct armv8_common *armv8 = target_to_armv8(target);
779 int retval;
780 uint32_t edecr;
781
782 if (target->state != TARGET_HALTED) {
783 LOG_WARNING("target not halted");
784 return ERROR_TARGET_NOT_HALTED;
785 }
786
787 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
788 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
789 if (retval != ERROR_OK)
790 return retval;
791
792 /* make sure EDECR.SS is not set when restoring the register */
793 edecr &= ~0x4;
794
795 /* set EDECR.SS to enter hardware step mode */
796 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
797 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* disable interrupts while stepping */
802 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
803 if (retval != ERROR_OK)
804 return ERROR_OK;
805
806 /* resume the target */
807 retval = aarch64_resume(target, current, address, 0, 0);
808 if (retval != ERROR_OK)
809 return retval;
810
811 long long then = timeval_ms();
812 while (target->state != TARGET_HALTED) {
813 retval = aarch64_poll(target);
814 if (retval != ERROR_OK)
815 return retval;
816 if (timeval_ms() > then + 1000) {
817 LOG_ERROR("timeout waiting for target halt");
818 return ERROR_FAIL;
819 }
820 }
821
822 /* restore EDECR */
823 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
824 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
825 if (retval != ERROR_OK)
826 return retval;
827
828 /* restore interrupts */
829 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
830 if (retval != ERROR_OK)
831 return ERROR_OK;
832
833 return ERROR_OK;
834 }
835
836 static int aarch64_restore_context(struct target *target, bool bpwp)
837 {
838 struct armv8_common *armv8 = target_to_armv8(target);
839
840 LOG_DEBUG(" ");
841
842 if (armv8->pre_restore_context)
843 armv8->pre_restore_context(target);
844
845 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
846
847 }
848
849 /*
850 * Cortex-A8 Breakpoint and watchpoint functions
851 */
852
853 /* Setup hardware Breakpoint Register Pair */
854 static int aarch64_set_breakpoint(struct target *target,
855 struct breakpoint *breakpoint, uint8_t matchmode)
856 {
857 int retval;
858 int brp_i = 0;
859 uint32_t control;
860 uint8_t byte_addr_select = 0x0F;
861 struct aarch64_common *aarch64 = target_to_aarch64(target);
862 struct armv8_common *armv8 = &aarch64->armv8_common;
863 struct aarch64_brp *brp_list = aarch64->brp_list;
864 uint32_t dscr;
865
866 if (breakpoint->set) {
867 LOG_WARNING("breakpoint already set");
868 return ERROR_OK;
869 }
870
871 if (breakpoint->type == BKPT_HARD) {
872 int64_t bpt_value;
873 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
874 brp_i++;
875 if (brp_i >= aarch64->brp_num) {
876 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
877 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
878 }
879 breakpoint->set = brp_i + 1;
880 if (breakpoint->length == 2)
881 byte_addr_select = (3 << (breakpoint->address & 0x02));
882 control = ((matchmode & 0x7) << 20)
883 | (1 << 13)
884 | (byte_addr_select << 5)
885 | (3 << 1) | 1;
886 brp_list[brp_i].used = 1;
887 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
888 brp_list[brp_i].control = control;
889 bpt_value = brp_list[brp_i].value;
890
891 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
892 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
893 (uint32_t)(bpt_value & 0xFFFFFFFF));
894 if (retval != ERROR_OK)
895 return retval;
896 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
897 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
898 (uint32_t)(bpt_value >> 32));
899 if (retval != ERROR_OK)
900 return retval;
901
902 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
903 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
904 brp_list[brp_i].control);
905 if (retval != ERROR_OK)
906 return retval;
907 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
908 brp_list[brp_i].control,
909 brp_list[brp_i].value);
910
911 } else if (breakpoint->type == BKPT_SOFT) {
912 uint8_t code[4];
913
914 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
915 retval = target_read_memory(target,
916 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
917 breakpoint->length, 1,
918 breakpoint->orig_instr);
919 if (retval != ERROR_OK)
920 return retval;
921
922 armv8_cache_d_inner_flush_virt(armv8,
923 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
924 breakpoint->length);
925
926 retval = target_write_memory(target,
927 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
928 breakpoint->length, 1, code);
929 if (retval != ERROR_OK)
930 return retval;
931
932 armv8_cache_d_inner_flush_virt(armv8,
933 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
934 breakpoint->length);
935
936 armv8_cache_i_inner_inval_virt(armv8,
937 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
938 breakpoint->length);
939
940 breakpoint->set = 0x11; /* Any nice value but 0 */
941 }
942
943 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
944 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
945 /* Ensure that halting debug mode is enable */
946 dscr = dscr | DSCR_HDE;
947 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
948 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
949 if (retval != ERROR_OK) {
950 LOG_DEBUG("Failed to set DSCR.HDE");
951 return retval;
952 }
953
954 return ERROR_OK;
955 }
956
957 static int aarch64_set_context_breakpoint(struct target *target,
958 struct breakpoint *breakpoint, uint8_t matchmode)
959 {
960 int retval = ERROR_FAIL;
961 int brp_i = 0;
962 uint32_t control;
963 uint8_t byte_addr_select = 0x0F;
964 struct aarch64_common *aarch64 = target_to_aarch64(target);
965 struct armv8_common *armv8 = &aarch64->armv8_common;
966 struct aarch64_brp *brp_list = aarch64->brp_list;
967
968 if (breakpoint->set) {
969 LOG_WARNING("breakpoint already set");
970 return retval;
971 }
972 /*check available context BRPs*/
973 while ((brp_list[brp_i].used ||
974 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
975 brp_i++;
976
977 if (brp_i >= aarch64->brp_num) {
978 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
979 return ERROR_FAIL;
980 }
981
982 breakpoint->set = brp_i + 1;
983 control = ((matchmode & 0x7) << 20)
984 | (1 << 13)
985 | (byte_addr_select << 5)
986 | (3 << 1) | 1;
987 brp_list[brp_i].used = 1;
988 brp_list[brp_i].value = (breakpoint->asid);
989 brp_list[brp_i].control = control;
990 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
991 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
992 brp_list[brp_i].value);
993 if (retval != ERROR_OK)
994 return retval;
995 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
996 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
997 brp_list[brp_i].control);
998 if (retval != ERROR_OK)
999 return retval;
1000 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1001 brp_list[brp_i].control,
1002 brp_list[brp_i].value);
1003 return ERROR_OK;
1004
1005 }
1006
1007 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1008 {
1009 int retval = ERROR_FAIL;
1010 int brp_1 = 0; /* holds the contextID pair */
1011 int brp_2 = 0; /* holds the IVA pair */
1012 uint32_t control_CTX, control_IVA;
1013 uint8_t CTX_byte_addr_select = 0x0F;
1014 uint8_t IVA_byte_addr_select = 0x0F;
1015 uint8_t CTX_machmode = 0x03;
1016 uint8_t IVA_machmode = 0x01;
1017 struct aarch64_common *aarch64 = target_to_aarch64(target);
1018 struct armv8_common *armv8 = &aarch64->armv8_common;
1019 struct aarch64_brp *brp_list = aarch64->brp_list;
1020
1021 if (breakpoint->set) {
1022 LOG_WARNING("breakpoint already set");
1023 return retval;
1024 }
1025 /*check available context BRPs*/
1026 while ((brp_list[brp_1].used ||
1027 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1028 brp_1++;
1029
1030 printf("brp(CTX) found num: %d\n", brp_1);
1031 if (brp_1 >= aarch64->brp_num) {
1032 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1033 return ERROR_FAIL;
1034 }
1035
1036 while ((brp_list[brp_2].used ||
1037 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1038 brp_2++;
1039
1040 printf("brp(IVA) found num: %d\n", brp_2);
1041 if (brp_2 >= aarch64->brp_num) {
1042 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1043 return ERROR_FAIL;
1044 }
1045
1046 breakpoint->set = brp_1 + 1;
1047 breakpoint->linked_BRP = brp_2;
1048 control_CTX = ((CTX_machmode & 0x7) << 20)
1049 | (brp_2 << 16)
1050 | (0 << 14)
1051 | (CTX_byte_addr_select << 5)
1052 | (3 << 1) | 1;
1053 brp_list[brp_1].used = 1;
1054 brp_list[brp_1].value = (breakpoint->asid);
1055 brp_list[brp_1].control = control_CTX;
1056 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1057 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1058 brp_list[brp_1].value);
1059 if (retval != ERROR_OK)
1060 return retval;
1061 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1062 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1063 brp_list[brp_1].control);
1064 if (retval != ERROR_OK)
1065 return retval;
1066
1067 control_IVA = ((IVA_machmode & 0x7) << 20)
1068 | (brp_1 << 16)
1069 | (1 << 13)
1070 | (IVA_byte_addr_select << 5)
1071 | (3 << 1) | 1;
1072 brp_list[brp_2].used = 1;
1073 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1074 brp_list[brp_2].control = control_IVA;
1075 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1076 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1077 brp_list[brp_2].value & 0xFFFFFFFF);
1078 if (retval != ERROR_OK)
1079 return retval;
1080 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1081 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1082 brp_list[brp_2].value >> 32);
1083 if (retval != ERROR_OK)
1084 return retval;
1085 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1086 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1087 brp_list[brp_2].control);
1088 if (retval != ERROR_OK)
1089 return retval;
1090
1091 return ERROR_OK;
1092 }
1093
1094 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1095 {
1096 int retval;
1097 struct aarch64_common *aarch64 = target_to_aarch64(target);
1098 struct armv8_common *armv8 = &aarch64->armv8_common;
1099 struct aarch64_brp *brp_list = aarch64->brp_list;
1100
1101 if (!breakpoint->set) {
1102 LOG_WARNING("breakpoint not set");
1103 return ERROR_OK;
1104 }
1105
1106 if (breakpoint->type == BKPT_HARD) {
1107 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1108 int brp_i = breakpoint->set - 1;
1109 int brp_j = breakpoint->linked_BRP;
1110 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1111 LOG_DEBUG("Invalid BRP number in breakpoint");
1112 return ERROR_OK;
1113 }
1114 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1115 brp_list[brp_i].control, brp_list[brp_i].value);
1116 brp_list[brp_i].used = 0;
1117 brp_list[brp_i].value = 0;
1118 brp_list[brp_i].control = 0;
1119 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1120 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1121 brp_list[brp_i].control);
1122 if (retval != ERROR_OK)
1123 return retval;
1124 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1125 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1126 (uint32_t)brp_list[brp_i].value);
1127 if (retval != ERROR_OK)
1128 return retval;
1129 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1130 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1131 (uint32_t)brp_list[brp_i].value);
1132 if (retval != ERROR_OK)
1133 return retval;
1134 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1135 LOG_DEBUG("Invalid BRP number in breakpoint");
1136 return ERROR_OK;
1137 }
1138 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1139 brp_list[brp_j].control, brp_list[brp_j].value);
1140 brp_list[brp_j].used = 0;
1141 brp_list[brp_j].value = 0;
1142 brp_list[brp_j].control = 0;
1143 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1144 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1145 brp_list[brp_j].control);
1146 if (retval != ERROR_OK)
1147 return retval;
1148 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1149 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1150 (uint32_t)brp_list[brp_j].value);
1151 if (retval != ERROR_OK)
1152 return retval;
1153 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1154 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1155 (uint32_t)brp_list[brp_j].value);
1156 if (retval != ERROR_OK)
1157 return retval;
1158
1159 breakpoint->linked_BRP = 0;
1160 breakpoint->set = 0;
1161 return ERROR_OK;
1162
1163 } else {
1164 int brp_i = breakpoint->set - 1;
1165 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1166 LOG_DEBUG("Invalid BRP number in breakpoint");
1167 return ERROR_OK;
1168 }
1169 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1170 brp_list[brp_i].control, brp_list[brp_i].value);
1171 brp_list[brp_i].used = 0;
1172 brp_list[brp_i].value = 0;
1173 brp_list[brp_i].control = 0;
1174 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1175 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1176 brp_list[brp_i].control);
1177 if (retval != ERROR_OK)
1178 return retval;
1179 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1180 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1181 brp_list[brp_i].value);
1182 if (retval != ERROR_OK)
1183 return retval;
1184
1185 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1186 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1187 (uint32_t)brp_list[brp_i].value);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 breakpoint->set = 0;
1191 return ERROR_OK;
1192 }
1193 } else {
1194 /* restore original instruction (kept in target endianness) */
1195
1196 armv8_cache_d_inner_flush_virt(armv8,
1197 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1198 breakpoint->length);
1199
1200 if (breakpoint->length == 4) {
1201 retval = target_write_memory(target,
1202 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1203 4, 1, breakpoint->orig_instr);
1204 if (retval != ERROR_OK)
1205 return retval;
1206 } else {
1207 retval = target_write_memory(target,
1208 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1209 2, 1, breakpoint->orig_instr);
1210 if (retval != ERROR_OK)
1211 return retval;
1212 }
1213
1214 armv8_cache_d_inner_flush_virt(armv8,
1215 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1216 breakpoint->length);
1217
1218 armv8_cache_i_inner_inval_virt(armv8,
1219 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1220 breakpoint->length);
1221 }
1222 breakpoint->set = 0;
1223
1224 return ERROR_OK;
1225 }
1226
1227 static int aarch64_add_breakpoint(struct target *target,
1228 struct breakpoint *breakpoint)
1229 {
1230 struct aarch64_common *aarch64 = target_to_aarch64(target);
1231
1232 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1233 LOG_INFO("no hardware breakpoint available");
1234 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1235 }
1236
1237 if (breakpoint->type == BKPT_HARD)
1238 aarch64->brp_num_available--;
1239
1240 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1241 }
1242
1243 static int aarch64_add_context_breakpoint(struct target *target,
1244 struct breakpoint *breakpoint)
1245 {
1246 struct aarch64_common *aarch64 = target_to_aarch64(target);
1247
1248 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1249 LOG_INFO("no hardware breakpoint available");
1250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1251 }
1252
1253 if (breakpoint->type == BKPT_HARD)
1254 aarch64->brp_num_available--;
1255
1256 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1257 }
1258
1259 static int aarch64_add_hybrid_breakpoint(struct target *target,
1260 struct breakpoint *breakpoint)
1261 {
1262 struct aarch64_common *aarch64 = target_to_aarch64(target);
1263
1264 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1265 LOG_INFO("no hardware breakpoint available");
1266 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1267 }
1268
1269 if (breakpoint->type == BKPT_HARD)
1270 aarch64->brp_num_available--;
1271
1272 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1273 }
1274
1275
1276 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1277 {
1278 struct aarch64_common *aarch64 = target_to_aarch64(target);
1279
1280 #if 0
1281 /* It is perfectly possible to remove breakpoints while the target is running */
1282 if (target->state != TARGET_HALTED) {
1283 LOG_WARNING("target not halted");
1284 return ERROR_TARGET_NOT_HALTED;
1285 }
1286 #endif
1287
1288 if (breakpoint->set) {
1289 aarch64_unset_breakpoint(target, breakpoint);
1290 if (breakpoint->type == BKPT_HARD)
1291 aarch64->brp_num_available++;
1292 }
1293
1294 return ERROR_OK;
1295 }
1296
1297 /*
1298 * Cortex-A8 Reset functions
1299 */
1300
1301 static int aarch64_assert_reset(struct target *target)
1302 {
1303 struct armv8_common *armv8 = target_to_armv8(target);
1304
1305 LOG_DEBUG(" ");
1306
1307 /* FIXME when halt is requested, make it work somehow... */
1308
1309 /* Issue some kind of warm reset. */
1310 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1311 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1312 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1313 /* REVISIT handle "pulls" cases, if there's
1314 * hardware that needs them to work.
1315 */
1316 jtag_add_reset(0, 1);
1317 } else {
1318 LOG_ERROR("%s: how to reset?", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321
1322 /* registers are now invalid */
1323 register_cache_invalidate(armv8->arm.core_cache);
1324
1325 target->state = TARGET_RESET;
1326
1327 return ERROR_OK;
1328 }
1329
1330 static int aarch64_deassert_reset(struct target *target)
1331 {
1332 int retval;
1333
1334 LOG_DEBUG(" ");
1335
1336 /* be certain SRST is off */
1337 jtag_add_reset(0, 0);
1338
1339 retval = aarch64_poll(target);
1340 if (retval != ERROR_OK)
1341 return retval;
1342
1343 if (target->reset_halt) {
1344 if (target->state != TARGET_HALTED) {
1345 LOG_WARNING("%s: ran after reset and before halt ...",
1346 target_name(target));
1347 retval = target_halt(target);
1348 if (retval != ERROR_OK)
1349 return retval;
1350 }
1351 }
1352
1353 return ERROR_OK;
1354 }
1355
1356 static int aarch64_write_apb_ap_memory(struct target *target,
1357 uint64_t address, uint32_t size,
1358 uint32_t count, const uint8_t *buffer)
1359 {
1360 /* write memory through APB-AP */
1361 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1362 struct armv8_common *armv8 = target_to_armv8(target);
1363 struct arm_dpm *dpm = &armv8->dpm;
1364 struct arm *arm = &armv8->arm;
1365 int total_bytes = count * size;
1366 int total_u32;
1367 int start_byte = address & 0x3;
1368 int end_byte = (address + total_bytes) & 0x3;
1369 struct reg *reg;
1370 uint32_t dscr;
1371 uint8_t *tmp_buff = NULL;
1372
1373 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1374 address, size, count);
1375 if (target->state != TARGET_HALTED) {
1376 LOG_WARNING("target not halted");
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379
1380 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1381
1382 /* Mark register R0 as dirty, as it will be used
1383 * for transferring the data.
1384 * It will be restored automatically when exiting
1385 * debug mode
1386 */
1387 reg = armv8_reg_current(arm, 1);
1388 reg->dirty = true;
1389
1390 reg = armv8_reg_current(arm, 0);
1391 reg->dirty = true;
1392
1393 /* clear any abort */
1394 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1395 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1396 if (retval != ERROR_OK)
1397 return retval;
1398
1399
1400 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1401
1402 /* The algorithm only copies 32 bit words, so the buffer
1403 * should be expanded to include the words at either end.
1404 * The first and last words will be read first to avoid
1405 * corruption if needed.
1406 */
1407 tmp_buff = malloc(total_u32 * 4);
1408
1409 if ((start_byte != 0) && (total_u32 > 1)) {
1410 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1411 * the other bytes in the word.
1412 */
1413 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1414 if (retval != ERROR_OK)
1415 goto error_free_buff_w;
1416 }
1417
1418 /* If end of write is not aligned, or the write is less than 4 bytes */
1419 if ((end_byte != 0) ||
1420 ((total_u32 == 1) && (total_bytes != 4))) {
1421
1422 /* Read the last word to avoid corruption during 32 bit write */
1423 int mem_offset = (total_u32-1) * 4;
1424 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1425 if (retval != ERROR_OK)
1426 goto error_free_buff_w;
1427 }
1428
1429 /* Copy the write buffer over the top of the temporary buffer */
1430 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1431
1432 /* We now have a 32 bit aligned buffer that can be written */
1433
1434 /* Read DSCR */
1435 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1436 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1437 if (retval != ERROR_OK)
1438 goto error_free_buff_w;
1439
1440 /* Set Normal access mode */
1441 dscr = (dscr & ~DSCR_MA);
1442 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1443 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1444
1445 if (arm->core_state == ARM_STATE_AARCH64) {
1446 /* Write X0 with value 'address' using write procedure */
1447 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1448 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1449 retval = dpm->instr_write_data_dcc_64(dpm,
1450 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1451 } else {
1452 /* Write R0 with value 'address' using write procedure */
1453 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1454 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1455 dpm->instr_write_data_dcc(dpm,
1456 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), address & ~0x3ULL);
1457
1458 }
1459 /* Step 1.d - Change DCC to memory mode */
1460 dscr = dscr | DSCR_MA;
1461 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1462 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1463 if (retval != ERROR_OK)
1464 goto error_unset_dtr_w;
1465
1466
1467 /* Step 2.a - Do the write */
1468 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1469 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1470 if (retval != ERROR_OK)
1471 goto error_unset_dtr_w;
1472
1473 /* Step 3.a - Switch DTR mode back to Normal mode */
1474 dscr = (dscr & ~DSCR_MA);
1475 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1476 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1477 if (retval != ERROR_OK)
1478 goto error_unset_dtr_w;
1479
1480 /* Check for sticky abort flags in the DSCR */
1481 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1482 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1483 if (retval != ERROR_OK)
1484 goto error_free_buff_w;
1485 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1486 /* Abort occurred - clear it and exit */
1487 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1488 mem_ap_write_atomic_u32(armv8->debug_ap,
1489 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1490 goto error_free_buff_w;
1491 }
1492
1493 /* Done */
1494 free(tmp_buff);
1495 return ERROR_OK;
1496
1497 error_unset_dtr_w:
1498 /* Unset DTR mode */
1499 mem_ap_read_atomic_u32(armv8->debug_ap,
1500 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1501 dscr = (dscr & ~DSCR_MA);
1502 mem_ap_write_atomic_u32(armv8->debug_ap,
1503 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1504 error_free_buff_w:
1505 LOG_ERROR("error");
1506 free(tmp_buff);
1507 return ERROR_FAIL;
1508 }
1509
1510 static int aarch64_read_apb_ap_memory(struct target *target,
1511 target_addr_t address, uint32_t size,
1512 uint32_t count, uint8_t *buffer)
1513 {
1514 /* read memory through APB-AP */
1515 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1516 struct armv8_common *armv8 = target_to_armv8(target);
1517 struct arm_dpm *dpm = &armv8->dpm;
1518 struct arm *arm = &armv8->arm;
1519 int total_bytes = count * size;
1520 int total_u32;
1521 int start_byte = address & 0x3;
1522 int end_byte = (address + total_bytes) & 0x3;
1523 struct reg *reg;
1524 uint32_t dscr;
1525 uint8_t *tmp_buff = NULL;
1526 uint8_t *u8buf_ptr;
1527 uint32_t value;
1528
1529 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1530 address, size, count);
1531 if (target->state != TARGET_HALTED) {
1532 LOG_WARNING("target not halted");
1533 return ERROR_TARGET_NOT_HALTED;
1534 }
1535
1536 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1537 /* Mark register X0, X1 as dirty, as it will be used
1538 * for transferring the data.
1539 * It will be restored automatically when exiting
1540 * debug mode
1541 */
1542 reg = armv8_reg_current(arm, 1);
1543 reg->dirty = true;
1544
1545 reg = armv8_reg_current(arm, 0);
1546 reg->dirty = true;
1547
1548 /* clear any abort */
1549 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1550 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1551 if (retval != ERROR_OK)
1552 goto error_free_buff_r;
1553
1554 /* Read DSCR */
1555 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1556 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1557
1558 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1559
1560 /* Set Normal access mode */
1561 dscr = (dscr & ~DSCR_MA);
1562 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1563 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1564
1565 if (arm->core_state == ARM_STATE_AARCH64) {
1566 /* Write X0 with value 'address' using write procedure */
1567 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1568 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1569 retval += dpm->instr_write_data_dcc_64(dpm,
1570 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1571 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1572 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1573 /* Step 1.e - Change DCC to memory mode */
1574 dscr = dscr | DSCR_MA;
1575 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1576 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1577 /* Step 1.f - read DBGDTRTX and discard the value */
1578 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1579 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1580 } else {
1581 /* Write R0 with value 'address' using write procedure */
1582 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1583 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1584 retval += dpm->instr_write_data_dcc(dpm,
1585 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), address & ~0x3ULL);
1586 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1587 retval += dpm->instr_execute(dpm, T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)));
1588 /* Step 1.e - Change DCC to memory mode */
1589 dscr = dscr | DSCR_MA;
1590 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1591 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1592 /* Step 1.f - read DBGDTRTX and discard the value */
1593 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1594 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1595
1596 }
1597 if (retval != ERROR_OK)
1598 goto error_unset_dtr_r;
1599
1600 /* Optimize the read as much as we can, either way we read in a single pass */
1601 if ((start_byte) || (end_byte)) {
1602 /* The algorithm only copies 32 bit words, so the buffer
1603 * should be expanded to include the words at either end.
1604 * The first and last words will be read into a temp buffer
1605 * to avoid corruption
1606 */
1607 tmp_buff = malloc(total_u32 * 4);
1608 if (!tmp_buff)
1609 goto error_unset_dtr_r;
1610
1611 /* use the tmp buffer to read the entire data */
1612 u8buf_ptr = tmp_buff;
1613 } else
1614 /* address and read length are aligned so read directly into the passed buffer */
1615 u8buf_ptr = buffer;
1616
1617 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1618 * Abort flags are sticky, so can be read at end of transactions
1619 *
1620 * This data is read in aligned to 32 bit boundary.
1621 */
1622
1623 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1624 * increments X0 by 4. */
1625 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1626 armv8->debug_base + CPUV8_DBG_DTRTX);
1627 if (retval != ERROR_OK)
1628 goto error_unset_dtr_r;
1629
1630 /* Step 3.a - set DTR access mode back to Normal mode */
1631 dscr = (dscr & ~DSCR_MA);
1632 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1633 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1634 if (retval != ERROR_OK)
1635 goto error_free_buff_r;
1636
1637 /* Step 3.b - read DBGDTRTX for the final value */
1638 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1639 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1640 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1641
1642 /* Check for sticky abort flags in the DSCR */
1643 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1644 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1645 if (retval != ERROR_OK)
1646 goto error_free_buff_r;
1647 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1648 /* Abort occurred - clear it and exit */
1649 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1650 mem_ap_write_atomic_u32(armv8->debug_ap,
1651 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1652 goto error_free_buff_r;
1653 }
1654
1655 /* check if we need to copy aligned data by applying any shift necessary */
1656 if (tmp_buff) {
1657 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1658 free(tmp_buff);
1659 }
1660
1661 /* Done */
1662 return ERROR_OK;
1663
1664 error_unset_dtr_r:
1665 /* Unset DTR mode */
1666 mem_ap_read_atomic_u32(armv8->debug_ap,
1667 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1668 dscr = (dscr & ~DSCR_MA);
1669 mem_ap_write_atomic_u32(armv8->debug_ap,
1670 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1671 error_free_buff_r:
1672 LOG_ERROR("error");
1673 free(tmp_buff);
1674 return ERROR_FAIL;
1675 }
1676
1677 static int aarch64_read_phys_memory(struct target *target,
1678 target_addr_t address, uint32_t size,
1679 uint32_t count, uint8_t *buffer)
1680 {
1681 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1682 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1683 address, size, count);
1684
1685 if (count && buffer) {
1686 /* read memory through APB-AP */
1687 retval = aarch64_mmu_modify(target, 0);
1688 if (retval != ERROR_OK)
1689 return retval;
1690 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1691 }
1692 return retval;
1693 }
1694
1695 static int aarch64_read_memory(struct target *target, target_addr_t address,
1696 uint32_t size, uint32_t count, uint8_t *buffer)
1697 {
1698 int mmu_enabled = 0;
1699 int retval;
1700
1701 /* aarch64 handles unaligned memory access */
1702 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1703 size, count);
1704
1705 /* determine if MMU was enabled on target stop */
1706 retval = aarch64_mmu(target, &mmu_enabled);
1707 if (retval != ERROR_OK)
1708 return retval;
1709
1710 if (mmu_enabled) {
1711 retval = aarch64_check_address(target, address);
1712 if (retval != ERROR_OK)
1713 return retval;
1714 /* enable MMU as we could have disabled it for phys access */
1715 retval = aarch64_mmu_modify(target, 1);
1716 if (retval != ERROR_OK)
1717 return retval;
1718 }
1719 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1720 }
1721
1722 static int aarch64_write_phys_memory(struct target *target,
1723 target_addr_t address, uint32_t size,
1724 uint32_t count, const uint8_t *buffer)
1725 {
1726 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1727
1728 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1729 size, count);
1730
1731 if (count && buffer) {
1732 /* write memory through APB-AP */
1733 retval = aarch64_mmu_modify(target, 0);
1734 if (retval != ERROR_OK)
1735 return retval;
1736 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1737 }
1738
1739 return retval;
1740 }
1741
1742 static int aarch64_write_memory(struct target *target, target_addr_t address,
1743 uint32_t size, uint32_t count, const uint8_t *buffer)
1744 {
1745 int mmu_enabled = 0;
1746 int retval;
1747
1748 /* aarch64 handles unaligned memory access */
1749 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1750 "; count %" PRId32, address, size, count);
1751
1752 /* determine if MMU was enabled on target stop */
1753 retval = aarch64_mmu(target, &mmu_enabled);
1754 if (retval != ERROR_OK)
1755 return retval;
1756
1757 if (mmu_enabled) {
1758 retval = aarch64_check_address(target, address);
1759 if (retval != ERROR_OK)
1760 return retval;
1761 /* enable MMU as we could have disabled it for phys access */
1762 retval = aarch64_mmu_modify(target, 1);
1763 if (retval != ERROR_OK)
1764 return retval;
1765 }
1766 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1767 }
1768
1769 static int aarch64_handle_target_request(void *priv)
1770 {
1771 struct target *target = priv;
1772 struct armv8_common *armv8 = target_to_armv8(target);
1773 int retval;
1774
1775 if (!target_was_examined(target))
1776 return ERROR_OK;
1777 if (!target->dbg_msg_enabled)
1778 return ERROR_OK;
1779
1780 if (target->state == TARGET_RUNNING) {
1781 uint32_t request;
1782 uint32_t dscr;
1783 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1784 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1785
1786 /* check if we have data */
1787 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1788 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1789 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1790 if (retval == ERROR_OK) {
1791 target_request(target, request);
1792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1793 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1794 }
1795 }
1796 }
1797
1798 return ERROR_OK;
1799 }
1800
1801 static int aarch64_examine_first(struct target *target)
1802 {
1803 struct aarch64_common *aarch64 = target_to_aarch64(target);
1804 struct armv8_common *armv8 = &aarch64->armv8_common;
1805 struct adiv5_dap *swjdp = armv8->arm.dap;
1806 int i;
1807 int retval = ERROR_OK;
1808 uint64_t debug, ttypr;
1809 uint32_t cpuid;
1810 uint32_t tmp0, tmp1;
1811 debug = ttypr = cpuid = 0;
1812
1813 /* We do one extra read to ensure DAP is configured,
1814 * we call ahbap_debugport_init(swjdp) instead
1815 */
1816 retval = dap_dp_init(swjdp);
1817 if (retval != ERROR_OK)
1818 return retval;
1819
1820 /* Search for the APB-AB - it is needed for access to debug registers */
1821 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1822 if (retval != ERROR_OK) {
1823 LOG_ERROR("Could not find APB-AP for debug access");
1824 return retval;
1825 }
1826
1827 retval = mem_ap_init(armv8->debug_ap);
1828 if (retval != ERROR_OK) {
1829 LOG_ERROR("Could not initialize the APB-AP");
1830 return retval;
1831 }
1832
1833 armv8->debug_ap->memaccess_tck = 80;
1834
1835 if (!target->dbgbase_set) {
1836 uint32_t dbgbase;
1837 /* Get ROM Table base */
1838 uint32_t apid;
1839 int32_t coreidx = target->coreid;
1840 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1841 if (retval != ERROR_OK)
1842 return retval;
1843 /* Lookup 0x15 -- Processor DAP */
1844 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1845 &armv8->debug_base, &coreidx);
1846 if (retval != ERROR_OK)
1847 return retval;
1848 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1849 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1850 } else
1851 armv8->debug_base = target->dbgbase;
1852
1853 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1854 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1855 if (retval != ERROR_OK) {
1856 LOG_DEBUG("LOCK debug access fail");
1857 return retval;
1858 }
1859
1860 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1861 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1862 if (retval != ERROR_OK) {
1863 LOG_DEBUG("Examine %s failed", "oslock");
1864 return retval;
1865 }
1866
1867 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1868 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1869 if (retval != ERROR_OK) {
1870 LOG_DEBUG("Examine %s failed", "CPUID");
1871 return retval;
1872 }
1873
1874 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1875 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1876 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1877 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1878 if (retval != ERROR_OK) {
1879 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1880 return retval;
1881 }
1882 ttypr |= tmp1;
1883 ttypr = (ttypr << 32) | tmp0;
1884
1885 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1886 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1887 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1888 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1889 if (retval != ERROR_OK) {
1890 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1891 return retval;
1892 }
1893 debug |= tmp1;
1894 debug = (debug << 32) | tmp0;
1895
1896 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1897 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1898 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1899
1900 if (target->ctibase == 0) {
1901 /* assume a v8 rom table layout */
1902 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1903 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1904 } else
1905 armv8->cti_base = target->ctibase;
1906
1907 armv8->arm.core_type = ARM_MODE_MON;
1908 retval = aarch64_dpm_setup(aarch64, debug);
1909 if (retval != ERROR_OK)
1910 return retval;
1911
1912 /* Setup Breakpoint Register Pairs */
1913 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1914 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1915 aarch64->brp_num_available = aarch64->brp_num;
1916 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1917 for (i = 0; i < aarch64->brp_num; i++) {
1918 aarch64->brp_list[i].used = 0;
1919 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1920 aarch64->brp_list[i].type = BRP_NORMAL;
1921 else
1922 aarch64->brp_list[i].type = BRP_CONTEXT;
1923 aarch64->brp_list[i].value = 0;
1924 aarch64->brp_list[i].control = 0;
1925 aarch64->brp_list[i].BRPn = i;
1926 }
1927
1928 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1929
1930 target_set_examined(target);
1931 return ERROR_OK;
1932 }
1933
1934 static int aarch64_examine(struct target *target)
1935 {
1936 int retval = ERROR_OK;
1937
1938 /* don't re-probe hardware after each reset */
1939 if (!target_was_examined(target))
1940 retval = aarch64_examine_first(target);
1941
1942 /* Configure core debug access */
1943 if (retval == ERROR_OK)
1944 retval = aarch64_init_debug_access(target);
1945
1946 return retval;
1947 }
1948
1949 /*
1950 * Cortex-A8 target creation and initialization
1951 */
1952
1953 static int aarch64_init_target(struct command_context *cmd_ctx,
1954 struct target *target)
1955 {
1956 /* examine_first() does a bunch of this */
1957 return ERROR_OK;
1958 }
1959
1960 static int aarch64_init_arch_info(struct target *target,
1961 struct aarch64_common *aarch64, struct jtag_tap *tap)
1962 {
1963 struct armv8_common *armv8 = &aarch64->armv8_common;
1964 struct adiv5_dap *dap = armv8->arm.dap;
1965
1966 armv8->arm.dap = dap;
1967
1968 /* Setup struct aarch64_common */
1969 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1970 /* tap has no dap initialized */
1971 if (!tap->dap) {
1972 tap->dap = dap_init();
1973
1974 /* Leave (only) generic DAP stuff for debugport_init() */
1975 tap->dap->tap = tap;
1976 }
1977
1978 armv8->arm.dap = tap->dap;
1979
1980 aarch64->fast_reg_read = 0;
1981
1982 /* register arch-specific functions */
1983 armv8->examine_debug_reason = NULL;
1984
1985 armv8->post_debug_entry = aarch64_post_debug_entry;
1986
1987 armv8->pre_restore_context = NULL;
1988
1989 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1990
1991 /* REVISIT v7a setup should be in a v7a-specific routine */
1992 armv8_init_arch_info(target, armv8);
1993 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1994
1995 return ERROR_OK;
1996 }
1997
1998 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1999 {
2000 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2001
2002 return aarch64_init_arch_info(target, aarch64, target->tap);
2003 }
2004
2005 static int aarch64_mmu(struct target *target, int *enabled)
2006 {
2007 if (target->state != TARGET_HALTED) {
2008 LOG_ERROR("%s: target not halted", __func__);
2009 return ERROR_TARGET_INVALID;
2010 }
2011
2012 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2013 return ERROR_OK;
2014 }
2015
2016 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2017 target_addr_t *phys)
2018 {
2019 return armv8_mmu_translate_va(target, virt, phys);
2020 }
2021
2022 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2023 {
2024 struct target *target = get_current_target(CMD_CTX);
2025 struct armv8_common *armv8 = target_to_armv8(target);
2026
2027 return armv8_handle_cache_info_command(CMD_CTX,
2028 &armv8->armv8_mmu.armv8_cache);
2029 }
2030
2031
2032 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2033 {
2034 struct target *target = get_current_target(CMD_CTX);
2035 if (!target_was_examined(target)) {
2036 LOG_ERROR("target not examined yet");
2037 return ERROR_FAIL;
2038 }
2039
2040 return aarch64_init_debug_access(target);
2041 }
2042 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2043 {
2044 struct target *target = get_current_target(CMD_CTX);
2045 /* check target is an smp target */
2046 struct target_list *head;
2047 struct target *curr;
2048 head = target->head;
2049 target->smp = 0;
2050 if (head != (struct target_list *)NULL) {
2051 while (head != (struct target_list *)NULL) {
2052 curr = head->target;
2053 curr->smp = 0;
2054 head = head->next;
2055 }
2056 /* fixes the target display to the debugger */
2057 target->gdb_service->target = target;
2058 }
2059 return ERROR_OK;
2060 }
2061
2062 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2063 {
2064 struct target *target = get_current_target(CMD_CTX);
2065 struct target_list *head;
2066 struct target *curr;
2067 head = target->head;
2068 if (head != (struct target_list *)NULL) {
2069 target->smp = 1;
2070 while (head != (struct target_list *)NULL) {
2071 curr = head->target;
2072 curr->smp = 1;
2073 head = head->next;
2074 }
2075 }
2076 return ERROR_OK;
2077 }
2078
2079 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2080 {
2081 struct target *target = get_current_target(CMD_CTX);
2082 int retval = ERROR_OK;
2083 struct target_list *head;
2084 head = target->head;
2085 if (head != (struct target_list *)NULL) {
2086 if (CMD_ARGC == 1) {
2087 int coreid = 0;
2088 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2089 if (ERROR_OK != retval)
2090 return retval;
2091 target->gdb_service->core[1] = coreid;
2092
2093 }
2094 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2095 , target->gdb_service->core[1]);
2096 }
2097 return ERROR_OK;
2098 }
2099
2100 static const struct command_registration aarch64_exec_command_handlers[] = {
2101 {
2102 .name = "cache_info",
2103 .handler = aarch64_handle_cache_info_command,
2104 .mode = COMMAND_EXEC,
2105 .help = "display information about target caches",
2106 .usage = "",
2107 },
2108 {
2109 .name = "dbginit",
2110 .handler = aarch64_handle_dbginit_command,
2111 .mode = COMMAND_EXEC,
2112 .help = "Initialize core debug",
2113 .usage = "",
2114 },
2115 { .name = "smp_off",
2116 .handler = aarch64_handle_smp_off_command,
2117 .mode = COMMAND_EXEC,
2118 .help = "Stop smp handling",
2119 .usage = "",
2120 },
2121 {
2122 .name = "smp_on",
2123 .handler = aarch64_handle_smp_on_command,
2124 .mode = COMMAND_EXEC,
2125 .help = "Restart smp handling",
2126 .usage = "",
2127 },
2128 {
2129 .name = "smp_gdb",
2130 .handler = aarch64_handle_smp_gdb_command,
2131 .mode = COMMAND_EXEC,
2132 .help = "display/fix current core played to gdb",
2133 .usage = "",
2134 },
2135
2136
2137 COMMAND_REGISTRATION_DONE
2138 };
2139 static const struct command_registration aarch64_command_handlers[] = {
2140 {
2141 .chain = arm_command_handlers,
2142 },
2143 {
2144 .chain = armv8_command_handlers,
2145 },
2146 {
2147 .name = "cortex_a",
2148 .mode = COMMAND_ANY,
2149 .help = "Cortex-A command group",
2150 .usage = "",
2151 .chain = aarch64_exec_command_handlers,
2152 },
2153 COMMAND_REGISTRATION_DONE
2154 };
2155
2156 struct target_type aarch64_target = {
2157 .name = "aarch64",
2158
2159 .poll = aarch64_poll,
2160 .arch_state = armv8_arch_state,
2161
2162 .halt = aarch64_halt,
2163 .resume = aarch64_resume,
2164 .step = aarch64_step,
2165
2166 .assert_reset = aarch64_assert_reset,
2167 .deassert_reset = aarch64_deassert_reset,
2168
2169 /* REVISIT allow exporting VFP3 registers ... */
2170 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2171
2172 .read_memory = aarch64_read_memory,
2173 .write_memory = aarch64_write_memory,
2174
2175 .checksum_memory = arm_checksum_memory,
2176 .blank_check_memory = arm_blank_check_memory,
2177
2178 .run_algorithm = armv4_5_run_algorithm,
2179
2180 .add_breakpoint = aarch64_add_breakpoint,
2181 .add_context_breakpoint = aarch64_add_context_breakpoint,
2182 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2183 .remove_breakpoint = aarch64_remove_breakpoint,
2184 .add_watchpoint = NULL,
2185 .remove_watchpoint = NULL,
2186
2187 .commands = aarch64_command_handlers,
2188 .target_create = aarch64_target_create,
2189 .init_target = aarch64_init_target,
2190 .examine = aarch64_examine,
2191
2192 .read_phys_memory = aarch64_read_phys_memory,
2193 .write_phys_memory = aarch64_write_phys_memory,
2194 .mmu = aarch64_mmu,
2195 .virt2phys = aarch64_virt2phys,
2196 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)