target/cortex_a: fix waiting for target halted after step
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "transport/transport.h"
59 #include "smp.h"
60 #include <helper/time_support.h>
61
62 static int cortex_a_poll(struct target *target);
63 static int cortex_a_debug_entry(struct target *target);
64 static int cortex_a_restore_context(struct target *target, bool bpwp);
65 static int cortex_a_set_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_context_breakpoint(struct target *target,
68 struct breakpoint *breakpoint, uint8_t matchmode);
69 static int cortex_a_set_hybrid_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_unset_breakpoint(struct target *target,
72 struct breakpoint *breakpoint);
73 static int cortex_a_mmu(struct target *target, int *enabled);
74 static int cortex_a_mmu_modify(struct target *target, int enable);
75 static int cortex_a_virt2phys(struct target *target,
76 target_addr_t virt, target_addr_t *phys);
77 static int cortex_a_read_cpu_memory(struct target *target,
78 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
79
80
81 /* restore cp15_control_reg at resume */
82 static int cortex_a_restore_cp15_control_reg(struct target *target)
83 {
84 int retval = ERROR_OK;
85 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
86 struct armv7a_common *armv7a = target_to_armv7a(target);
87
88 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
89 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
90 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
91 retval = armv7a->arm.mcr(target, 15,
92 0, 0, /* op1, op2 */
93 1, 0, /* CRn, CRm */
94 cortex_a->cp15_control_reg);
95 }
96 return retval;
97 }
98
99 /*
100 * Set up ARM core for memory access.
101 * If !phys_access, switch to SVC mode and make sure MMU is on
102 * If phys_access, switch off mmu
103 */
104 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
105 {
106 struct armv7a_common *armv7a = target_to_armv7a(target);
107 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
108 int mmu_enabled = 0;
109
110 if (phys_access == 0) {
111 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
112 cortex_a_mmu(target, &mmu_enabled);
113 if (mmu_enabled)
114 cortex_a_mmu_modify(target, 1);
115 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
116 /* overwrite DACR to all-manager */
117 armv7a->arm.mcr(target, 15,
118 0, 0, 3, 0,
119 0xFFFFFFFF);
120 }
121 } else {
122 cortex_a_mmu(target, &mmu_enabled);
123 if (mmu_enabled)
124 cortex_a_mmu_modify(target, 0);
125 }
126 return ERROR_OK;
127 }
128
129 /*
130 * Restore ARM core after memory access.
131 * If !phys_access, switch to previous mode
132 * If phys_access, restore MMU setting
133 */
134 static int cortex_a_post_memaccess(struct target *target, int phys_access)
135 {
136 struct armv7a_common *armv7a = target_to_armv7a(target);
137 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
138
139 if (phys_access == 0) {
140 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
141 /* restore */
142 armv7a->arm.mcr(target, 15,
143 0, 0, 3, 0,
144 cortex_a->cp15_dacr_reg);
145 }
146 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
147 } else {
148 int mmu_enabled = 0;
149 cortex_a_mmu(target, &mmu_enabled);
150 if (mmu_enabled)
151 cortex_a_mmu_modify(target, 1);
152 }
153 return ERROR_OK;
154 }
155
156
157 /* modify cp15_control_reg in order to enable or disable mmu for :
158 * - virt2phys address conversion
159 * - read or write memory in phys or virt address */
160 static int cortex_a_mmu_modify(struct target *target, int enable)
161 {
162 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
163 struct armv7a_common *armv7a = target_to_armv7a(target);
164 int retval = ERROR_OK;
165 int need_write = 0;
166
167 if (enable) {
168 /* if mmu enabled at target stop and mmu not enable */
169 if (!(cortex_a->cp15_control_reg & 0x1U)) {
170 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
171 return ERROR_FAIL;
172 }
173 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
174 cortex_a->cp15_control_reg_curr |= 0x1U;
175 need_write = 1;
176 }
177 } else {
178 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
179 cortex_a->cp15_control_reg_curr &= ~0x1U;
180 need_write = 1;
181 }
182 }
183
184 if (need_write) {
185 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
186 enable ? "enable mmu" : "disable mmu",
187 cortex_a->cp15_control_reg_curr);
188
189 retval = armv7a->arm.mcr(target, 15,
190 0, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 cortex_a->cp15_control_reg_curr);
193 }
194 return retval;
195 }
196
197 /*
198 * Cortex-A Basic debug access, very low level assumes state is saved
199 */
200 static int cortex_a_init_debug_access(struct target *target)
201 {
202 struct armv7a_common *armv7a = target_to_armv7a(target);
203 uint32_t dscr;
204 int retval;
205
206 /* lock memory-mapped access to debug registers to prevent
207 * software interference */
208 retval = mem_ap_write_u32(armv7a->debug_ap,
209 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
210 if (retval != ERROR_OK)
211 return retval;
212
213 /* Disable cacheline fills and force cache write-through in debug state */
214 retval = mem_ap_write_u32(armv7a->debug_ap,
215 armv7a->debug_base + CPUDBG_DSCCR, 0);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /* Disable TLB lookup and refill/eviction in debug state */
220 retval = mem_ap_write_u32(armv7a->debug_ap,
221 armv7a->debug_base + CPUDBG_DSMCR, 0);
222 if (retval != ERROR_OK)
223 return retval;
224
225 retval = dap_run(armv7a->debug_ap->dap);
226 if (retval != ERROR_OK)
227 return retval;
228
229 /* Enabling of instruction execution in debug mode is done in debug_entry code */
230
231 /* Resync breakpoint registers */
232
233 /* Enable halt for breakpoint, watchpoint and vector catch */
234 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
235 armv7a->debug_base + CPUDBG_DSCR, &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
239 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
240 if (retval != ERROR_OK)
241 return retval;
242
243 /* Since this is likely called from init or reset, update target state information*/
244 return cortex_a_poll(target);
245 }
246
247 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
248 {
249 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
250 * Writes final value of DSCR into *dscr. Pass force to force always
251 * reading DSCR at least once. */
252 struct armv7a_common *armv7a = target_to_armv7a(target);
253 int64_t then = timeval_ms();
254 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
255 force = false;
256 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
257 armv7a->debug_base + CPUDBG_DSCR, dscr);
258 if (retval != ERROR_OK) {
259 LOG_ERROR("Could not read DSCR register");
260 return retval;
261 }
262 if (timeval_ms() > then + 1000) {
263 LOG_ERROR("Timeout waiting for InstrCompl=1");
264 return ERROR_FAIL;
265 }
266 }
267 return ERROR_OK;
268 }
269
270 /* To reduce needless round-trips, pass in a pointer to the current
271 * DSCR value. Initialize it to zero if you just need to know the
272 * value on return from this function; or DSCR_INSTR_COMP if you
273 * happen to know that no instruction is pending.
274 */
275 static int cortex_a_exec_opcode(struct target *target,
276 uint32_t opcode, uint32_t *dscr_p)
277 {
278 uint32_t dscr;
279 int retval;
280 struct armv7a_common *armv7a = target_to_armv7a(target);
281
282 dscr = dscr_p ? *dscr_p : 0;
283
284 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
285
286 /* Wait for InstrCompl bit to be set */
287 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
288 if (retval != ERROR_OK)
289 return retval;
290
291 retval = mem_ap_write_u32(armv7a->debug_ap,
292 armv7a->debug_base + CPUDBG_ITR, opcode);
293 if (retval != ERROR_OK)
294 return retval;
295
296 int64_t then = timeval_ms();
297 do {
298 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
299 armv7a->debug_base + CPUDBG_DSCR, &dscr);
300 if (retval != ERROR_OK) {
301 LOG_ERROR("Could not read DSCR register");
302 return retval;
303 }
304 if (timeval_ms() > then + 1000) {
305 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
306 return ERROR_FAIL;
307 }
308 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
309
310 if (dscr_p)
311 *dscr_p = dscr;
312
313 return retval;
314 }
315
316 /* Write to memory mapped registers directly with no cache or mmu handling */
317 static int cortex_a_dap_write_memap_register_u32(struct target *target,
318 uint32_t address,
319 uint32_t value)
320 {
321 int retval;
322 struct armv7a_common *armv7a = target_to_armv7a(target);
323
324 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
325
326 return retval;
327 }
328
329 /*
330 * Cortex-A implementation of Debug Programmer's Model
331 *
332 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
333 * so there's no need to poll for it before executing an instruction.
334 *
335 * NOTE that in several of these cases the "stall" mode might be useful.
336 * It'd let us queue a few operations together... prepare/finish might
337 * be the places to enable/disable that mode.
338 */
339
340 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
341 {
342 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
343 }
344
345 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
346 {
347 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
348 return mem_ap_write_u32(a->armv7a_common.debug_ap,
349 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
350 }
351
352 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
353 uint32_t *dscr_p)
354 {
355 uint32_t dscr = DSCR_INSTR_COMP;
356 int retval;
357
358 if (dscr_p)
359 dscr = *dscr_p;
360
361 /* Wait for DTRRXfull */
362 int64_t then = timeval_ms();
363 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
364 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
365 a->armv7a_common.debug_base + CPUDBG_DSCR,
366 &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 if (timeval_ms() > then + 1000) {
370 LOG_ERROR("Timeout waiting for read dcc");
371 return ERROR_FAIL;
372 }
373 }
374
375 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
376 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
377 if (retval != ERROR_OK)
378 return retval;
379 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
380
381 if (dscr_p)
382 *dscr_p = dscr;
383
384 return retval;
385 }
386
387 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
388 {
389 struct cortex_a_common *a = dpm_to_a(dpm);
390 uint32_t dscr;
391 int retval;
392
393 /* set up invariant: INSTR_COMP is set after ever DPM operation */
394 int64_t then = timeval_ms();
395 for (;; ) {
396 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
397 a->armv7a_common.debug_base + CPUDBG_DSCR,
398 &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 if ((dscr & DSCR_INSTR_COMP) != 0)
402 break;
403 if (timeval_ms() > then + 1000) {
404 LOG_ERROR("Timeout waiting for dpm prepare");
405 return ERROR_FAIL;
406 }
407 }
408
409 /* this "should never happen" ... */
410 if (dscr & DSCR_DTR_RX_FULL) {
411 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
412 /* Clear DCCRX */
413 retval = cortex_a_exec_opcode(
414 a->armv7a_common.arm.target,
415 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 }
420
421 return retval;
422 }
423
424 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
425 {
426 /* REVISIT what could be done here? */
427 return ERROR_OK;
428 }
429
430 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
431 uint32_t opcode, uint32_t data)
432 {
433 struct cortex_a_common *a = dpm_to_a(dpm);
434 int retval;
435 uint32_t dscr = DSCR_INSTR_COMP;
436
437 retval = cortex_a_write_dcc(a, data);
438 if (retval != ERROR_OK)
439 return retval;
440
441 return cortex_a_exec_opcode(
442 a->armv7a_common.arm.target,
443 opcode,
444 &dscr);
445 }
446
447 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
448 uint32_t opcode, uint32_t data)
449 {
450 struct cortex_a_common *a = dpm_to_a(dpm);
451 uint32_t dscr = DSCR_INSTR_COMP;
452 int retval;
453
454 retval = cortex_a_write_dcc(a, data);
455 if (retval != ERROR_OK)
456 return retval;
457
458 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
459 retval = cortex_a_exec_opcode(
460 a->armv7a_common.arm.target,
461 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465
466 /* then the opcode, taking data from R0 */
467 retval = cortex_a_exec_opcode(
468 a->armv7a_common.arm.target,
469 opcode,
470 &dscr);
471
472 return retval;
473 }
474
475 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
476 {
477 struct target *target = dpm->arm->target;
478 uint32_t dscr = DSCR_INSTR_COMP;
479
480 /* "Prefetch flush" after modifying execution status in CPSR */
481 return cortex_a_exec_opcode(target,
482 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
483 &dscr);
484 }
485
486 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t *data)
488 {
489 struct cortex_a_common *a = dpm_to_a(dpm);
490 int retval;
491 uint32_t dscr = DSCR_INSTR_COMP;
492
493 /* the opcode, writing data to DCC */
494 retval = cortex_a_exec_opcode(
495 a->armv7a_common.arm.target,
496 opcode,
497 &dscr);
498 if (retval != ERROR_OK)
499 return retval;
500
501 return cortex_a_read_dcc(a, data, &dscr);
502 }
503
504
505 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
506 uint32_t opcode, uint32_t *data)
507 {
508 struct cortex_a_common *a = dpm_to_a(dpm);
509 uint32_t dscr = DSCR_INSTR_COMP;
510 int retval;
511
512 /* the opcode, writing data to R0 */
513 retval = cortex_a_exec_opcode(
514 a->armv7a_common.arm.target,
515 opcode,
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
519
520 /* write R0 to DCC */
521 retval = cortex_a_exec_opcode(
522 a->armv7a_common.arm.target,
523 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
524 &dscr);
525 if (retval != ERROR_OK)
526 return retval;
527
528 return cortex_a_read_dcc(a, data, &dscr);
529 }
530
531 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
532 uint32_t addr, uint32_t control)
533 {
534 struct cortex_a_common *a = dpm_to_a(dpm);
535 uint32_t vr = a->armv7a_common.debug_base;
536 uint32_t cr = a->armv7a_common.debug_base;
537 int retval;
538
539 switch (index_t) {
540 case 0 ... 15: /* breakpoints */
541 vr += CPUDBG_BVR_BASE;
542 cr += CPUDBG_BCR_BASE;
543 break;
544 case 16 ... 31: /* watchpoints */
545 vr += CPUDBG_WVR_BASE;
546 cr += CPUDBG_WCR_BASE;
547 index_t -= 16;
548 break;
549 default:
550 return ERROR_FAIL;
551 }
552 vr += 4 * index_t;
553 cr += 4 * index_t;
554
555 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
556 (unsigned) vr, (unsigned) cr);
557
558 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
559 vr, addr);
560 if (retval != ERROR_OK)
561 return retval;
562 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
563 cr, control);
564 return retval;
565 }
566
567 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
568 {
569 struct cortex_a_common *a = dpm_to_a(dpm);
570 uint32_t cr;
571
572 switch (index_t) {
573 case 0 ... 15:
574 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
575 break;
576 case 16 ... 31:
577 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
578 index_t -= 16;
579 break;
580 default:
581 return ERROR_FAIL;
582 }
583 cr += 4 * index_t;
584
585 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
586
587 /* clear control register */
588 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
589 }
590
591 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
592 {
593 struct arm_dpm *dpm = &a->armv7a_common.dpm;
594 int retval;
595
596 dpm->arm = &a->armv7a_common.arm;
597 dpm->didr = didr;
598
599 dpm->prepare = cortex_a_dpm_prepare;
600 dpm->finish = cortex_a_dpm_finish;
601
602 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
603 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
604 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
605
606 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
607 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
608
609 dpm->bpwp_enable = cortex_a_bpwp_enable;
610 dpm->bpwp_disable = cortex_a_bpwp_disable;
611
612 retval = arm_dpm_setup(dpm);
613 if (retval == ERROR_OK)
614 retval = arm_dpm_initialize(dpm);
615
616 return retval;
617 }
618 static struct target *get_cortex_a(struct target *target, int32_t coreid)
619 {
620 struct target_list *head;
621 struct target *curr;
622
623 head = target->head;
624 while (head != (struct target_list *)NULL) {
625 curr = head->target;
626 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
627 return curr;
628 head = head->next;
629 }
630 return target;
631 }
632 static int cortex_a_halt(struct target *target);
633
634 static int cortex_a_halt_smp(struct target *target)
635 {
636 int retval = 0;
637 struct target_list *head;
638 struct target *curr;
639 head = target->head;
640 while (head != (struct target_list *)NULL) {
641 curr = head->target;
642 if ((curr != target) && (curr->state != TARGET_HALTED)
643 && target_was_examined(curr))
644 retval += cortex_a_halt(curr);
645 head = head->next;
646 }
647 return retval;
648 }
649
650 static int update_halt_gdb(struct target *target)
651 {
652 struct target *gdb_target = NULL;
653 struct target_list *head;
654 struct target *curr;
655 int retval = 0;
656
657 if (target->gdb_service && target->gdb_service->core[0] == -1) {
658 target->gdb_service->target = target;
659 target->gdb_service->core[0] = target->coreid;
660 retval += cortex_a_halt_smp(target);
661 }
662
663 if (target->gdb_service)
664 gdb_target = target->gdb_service->target;
665
666 foreach_smp_target(head, target->head) {
667 curr = head->target;
668 /* skip calling context */
669 if (curr == target)
670 continue;
671 if (!target_was_examined(curr))
672 continue;
673 /* skip targets that were already halted */
674 if (curr->state == TARGET_HALTED)
675 continue;
676 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
677 if (curr == gdb_target)
678 continue;
679
680 /* avoid recursion in cortex_a_poll() */
681 curr->smp = 0;
682 cortex_a_poll(curr);
683 curr->smp = 1;
684 }
685
686 /* after all targets were updated, poll the gdb serving target */
687 if (gdb_target != NULL && gdb_target != target)
688 cortex_a_poll(gdb_target);
689 return retval;
690 }
691
692 /*
693 * Cortex-A Run control
694 */
695
696 static int cortex_a_poll(struct target *target)
697 {
698 int retval = ERROR_OK;
699 uint32_t dscr;
700 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
701 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
702 enum target_state prev_target_state = target->state;
703 /* toggle to another core is done by gdb as follow */
704 /* maint packet J core_id */
705 /* continue */
706 /* the next polling trigger an halt event sent to gdb */
707 if ((target->state == TARGET_HALTED) && (target->smp) &&
708 (target->gdb_service) &&
709 (target->gdb_service->target == NULL)) {
710 target->gdb_service->target =
711 get_cortex_a(target, target->gdb_service->core[1]);
712 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
713 return retval;
714 }
715 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
716 armv7a->debug_base + CPUDBG_DSCR, &dscr);
717 if (retval != ERROR_OK)
718 return retval;
719 cortex_a->cpudbg_dscr = dscr;
720
721 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
722 if (prev_target_state != TARGET_HALTED) {
723 /* We have a halting debug event */
724 LOG_DEBUG("Target halted");
725 target->state = TARGET_HALTED;
726
727 retval = cortex_a_debug_entry(target);
728 if (retval != ERROR_OK)
729 return retval;
730
731 if (target->smp) {
732 retval = update_halt_gdb(target);
733 if (retval != ERROR_OK)
734 return retval;
735 }
736
737 if (prev_target_state == TARGET_DEBUG_RUNNING) {
738 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
739 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
740 if (arm_semihosting(target, &retval) != 0)
741 return retval;
742
743 target_call_event_callbacks(target,
744 TARGET_EVENT_HALTED);
745 }
746 }
747 } else
748 target->state = TARGET_RUNNING;
749
750 return retval;
751 }
752
753 static int cortex_a_halt(struct target *target)
754 {
755 int retval = ERROR_OK;
756 uint32_t dscr;
757 struct armv7a_common *armv7a = target_to_armv7a(target);
758
759 /*
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
762 */
763 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
764 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
765 if (retval != ERROR_OK)
766 return retval;
767
768 int64_t then = timeval_ms();
769 for (;; ) {
770 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
771 armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 return retval;
774 if ((dscr & DSCR_CORE_HALTED) != 0)
775 break;
776 if (timeval_ms() > then + 1000) {
777 LOG_ERROR("Timeout waiting for halt");
778 return ERROR_FAIL;
779 }
780 }
781
782 target->debug_reason = DBG_REASON_DBGRQ;
783
784 return ERROR_OK;
785 }
786
787 static int cortex_a_internal_restore(struct target *target, int current,
788 target_addr_t *address, int handle_breakpoints, int debug_execution)
789 {
790 struct armv7a_common *armv7a = target_to_armv7a(target);
791 struct arm *arm = &armv7a->arm;
792 int retval;
793 uint32_t resume_pc;
794
795 if (!debug_execution)
796 target_free_all_working_areas(target);
797
798 #if 0
799 if (debug_execution) {
800 /* Disable interrupts */
801 /* We disable interrupts in the PRIMASK register instead of
802 * masking with C_MASKINTS,
803 * This is probably the same issue as Cortex-M3 Errata 377493:
804 * C_MASKINTS in parallel with disabled interrupts can cause
805 * local faults to not be taken. */
806 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
807 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
808 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
809
810 /* Make sure we are in Thumb mode */
811 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
812 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
813 32) | (1 << 24));
814 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
815 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
816 }
817 #endif
818
819 /* current = 1: continue on current pc, otherwise continue at <address> */
820 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
821 if (!current)
822 resume_pc = *address;
823 else
824 *address = resume_pc;
825
826 /* Make sure that the Armv7 gdb thumb fixups does not
827 * kill the return address
828 */
829 switch (arm->core_state) {
830 case ARM_STATE_ARM:
831 resume_pc &= 0xFFFFFFFC;
832 break;
833 case ARM_STATE_THUMB:
834 case ARM_STATE_THUMB_EE:
835 /* When the return address is loaded into PC
836 * bit 0 must be 1 to stay in Thumb state
837 */
838 resume_pc |= 0x1;
839 break;
840 case ARM_STATE_JAZELLE:
841 LOG_ERROR("How do I resume into Jazelle state??");
842 return ERROR_FAIL;
843 case ARM_STATE_AARCH64:
844 LOG_ERROR("Shoudn't be in AARCH64 state");
845 return ERROR_FAIL;
846 }
847 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
848 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
849 arm->pc->dirty = true;
850 arm->pc->valid = true;
851
852 /* restore dpm_mode at system halt */
853 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
854 /* called it now before restoring context because it uses cpu
855 * register r0 for restoring cp15 control register */
856 retval = cortex_a_restore_cp15_control_reg(target);
857 if (retval != ERROR_OK)
858 return retval;
859 retval = cortex_a_restore_context(target, handle_breakpoints);
860 if (retval != ERROR_OK)
861 return retval;
862 target->debug_reason = DBG_REASON_NOTHALTED;
863 target->state = TARGET_RUNNING;
864
865 /* registers are now invalid */
866 register_cache_invalidate(arm->core_cache);
867
868 #if 0
869 /* the front-end may request us not to handle breakpoints */
870 if (handle_breakpoints) {
871 /* Single step past breakpoint at current address */
872 breakpoint = breakpoint_find(target, resume_pc);
873 if (breakpoint) {
874 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
875 cortex_m3_unset_breakpoint(target, breakpoint);
876 cortex_m3_single_step_core(target);
877 cortex_m3_set_breakpoint(target, breakpoint);
878 }
879 }
880
881 #endif
882 return retval;
883 }
884
885 static int cortex_a_internal_restart(struct target *target)
886 {
887 struct armv7a_common *armv7a = target_to_armv7a(target);
888 struct arm *arm = &armv7a->arm;
889 int retval;
890 uint32_t dscr;
891 /*
892 * * Restart core and wait for it to be started. Clear ITRen and sticky
893 * * exception flags: see ARMv7 ARM, C5.9.
894 *
895 * REVISIT: for single stepping, we probably want to
896 * disable IRQs by default, with optional override...
897 */
898
899 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
900 armv7a->debug_base + CPUDBG_DSCR, &dscr);
901 if (retval != ERROR_OK)
902 return retval;
903
904 if ((dscr & DSCR_INSTR_COMP) == 0)
905 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
906
907 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
908 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
909 if (retval != ERROR_OK)
910 return retval;
911
912 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
913 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
914 DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
917
918 int64_t then = timeval_ms();
919 for (;; ) {
920 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
921 armv7a->debug_base + CPUDBG_DSCR, &dscr);
922 if (retval != ERROR_OK)
923 return retval;
924 if ((dscr & DSCR_CORE_RESTARTED) != 0)
925 break;
926 if (timeval_ms() > then + 1000) {
927 LOG_ERROR("Timeout waiting for resume");
928 return ERROR_FAIL;
929 }
930 }
931
932 target->debug_reason = DBG_REASON_NOTHALTED;
933 target->state = TARGET_RUNNING;
934
935 /* registers are now invalid */
936 register_cache_invalidate(arm->core_cache);
937
938 return ERROR_OK;
939 }
940
941 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
942 {
943 int retval = 0;
944 struct target_list *head;
945 struct target *curr;
946 target_addr_t address;
947 head = target->head;
948 while (head != (struct target_list *)NULL) {
949 curr = head->target;
950 if ((curr != target) && (curr->state != TARGET_RUNNING)
951 && target_was_examined(curr)) {
952 /* resume current address , not in step mode */
953 retval += cortex_a_internal_restore(curr, 1, &address,
954 handle_breakpoints, 0);
955 retval += cortex_a_internal_restart(curr);
956 }
957 head = head->next;
958
959 }
960 return retval;
961 }
962
963 static int cortex_a_resume(struct target *target, int current,
964 target_addr_t address, int handle_breakpoints, int debug_execution)
965 {
966 int retval = 0;
967 /* dummy resume for smp toggle in order to reduce gdb impact */
968 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
969 /* simulate a start and halt of target */
970 target->gdb_service->target = NULL;
971 target->gdb_service->core[0] = target->gdb_service->core[1];
972 /* fake resume at next poll we play the target core[1], see poll*/
973 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
974 return 0;
975 }
976 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
977 if (target->smp) {
978 target->gdb_service->core[0] = -1;
979 retval = cortex_a_restore_smp(target, handle_breakpoints);
980 if (retval != ERROR_OK)
981 return retval;
982 }
983 cortex_a_internal_restart(target);
984
985 if (!debug_execution) {
986 target->state = TARGET_RUNNING;
987 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
988 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
989 } else {
990 target->state = TARGET_DEBUG_RUNNING;
991 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
992 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
993 }
994
995 return ERROR_OK;
996 }
997
998 static int cortex_a_debug_entry(struct target *target)
999 {
1000 uint32_t dscr;
1001 int retval = ERROR_OK;
1002 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1003 struct armv7a_common *armv7a = target_to_armv7a(target);
1004 struct arm *arm = &armv7a->arm;
1005
1006 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1007
1008 /* REVISIT surely we should not re-read DSCR !! */
1009 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1010 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1015 * imprecise data aborts get discarded by issuing a Data
1016 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1017 */
1018
1019 /* Enable the ITR execution once we are in debug mode */
1020 dscr |= DSCR_ITR_EN;
1021 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1022 armv7a->debug_base + CPUDBG_DSCR, dscr);
1023 if (retval != ERROR_OK)
1024 return retval;
1025
1026 /* Examine debug reason */
1027 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1028
1029 /* save address of instruction that triggered the watchpoint? */
1030 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1031 uint32_t wfar;
1032
1033 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1034 armv7a->debug_base + CPUDBG_WFAR,
1035 &wfar);
1036 if (retval != ERROR_OK)
1037 return retval;
1038 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1039 }
1040
1041 /* First load register accessible through core debug port */
1042 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1043 if (retval != ERROR_OK)
1044 return retval;
1045
1046 if (arm->spsr) {
1047 /* read SPSR */
1048 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1049 if (retval != ERROR_OK)
1050 return retval;
1051 }
1052
1053 #if 0
1054 /* TODO, Move this */
1055 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1056 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1057 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1058
1059 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1060 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1061
1062 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1063 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1064 #endif
1065
1066 /* Are we in an exception handler */
1067 /* armv4_5->exception_number = 0; */
1068 if (armv7a->post_debug_entry) {
1069 retval = armv7a->post_debug_entry(target);
1070 if (retval != ERROR_OK)
1071 return retval;
1072 }
1073
1074 return retval;
1075 }
1076
1077 static int cortex_a_post_debug_entry(struct target *target)
1078 {
1079 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1080 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1081 int retval;
1082
1083 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1084 retval = armv7a->arm.mrc(target, 15,
1085 0, 0, /* op1, op2 */
1086 1, 0, /* CRn, CRm */
1087 &cortex_a->cp15_control_reg);
1088 if (retval != ERROR_OK)
1089 return retval;
1090 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1091 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1092
1093 if (!armv7a->is_armv7r)
1094 armv7a_read_ttbcr(target);
1095
1096 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1097 armv7a_identify_cache(target);
1098
1099 if (armv7a->is_armv7r) {
1100 armv7a->armv7a_mmu.mmu_enabled = 0;
1101 } else {
1102 armv7a->armv7a_mmu.mmu_enabled =
1103 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1104 }
1105 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1106 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1107 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1108 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1109 cortex_a->curr_mode = armv7a->arm.core_mode;
1110
1111 /* switch to SVC mode to read DACR */
1112 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1113 armv7a->arm.mrc(target, 15,
1114 0, 0, 3, 0,
1115 &cortex_a->cp15_dacr_reg);
1116
1117 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1118 cortex_a->cp15_dacr_reg);
1119
1120 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1121 return ERROR_OK;
1122 }
1123
1124 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1125 {
1126 struct armv7a_common *armv7a = target_to_armv7a(target);
1127 uint32_t dscr;
1128
1129 /* Read DSCR */
1130 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1131 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1132 if (ERROR_OK != retval)
1133 return retval;
1134
1135 /* clear bitfield */
1136 dscr &= ~bit_mask;
1137 /* put new value */
1138 dscr |= value & bit_mask;
1139
1140 /* write new DSCR */
1141 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1142 armv7a->debug_base + CPUDBG_DSCR, dscr);
1143 return retval;
1144 }
1145
1146 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1147 int handle_breakpoints)
1148 {
1149 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1150 struct armv7a_common *armv7a = target_to_armv7a(target);
1151 struct arm *arm = &armv7a->arm;
1152 struct breakpoint *breakpoint = NULL;
1153 struct breakpoint stepbreakpoint;
1154 struct reg *r;
1155 int retval;
1156
1157 if (target->state != TARGET_HALTED) {
1158 LOG_WARNING("target not halted");
1159 return ERROR_TARGET_NOT_HALTED;
1160 }
1161
1162 /* current = 1: continue on current pc, otherwise continue at <address> */
1163 r = arm->pc;
1164 if (!current)
1165 buf_set_u32(r->value, 0, 32, address);
1166 else
1167 address = buf_get_u32(r->value, 0, 32);
1168
1169 /* The front-end may request us not to handle breakpoints.
1170 * But since Cortex-A uses breakpoint for single step,
1171 * we MUST handle breakpoints.
1172 */
1173 handle_breakpoints = 1;
1174 if (handle_breakpoints) {
1175 breakpoint = breakpoint_find(target, address);
1176 if (breakpoint)
1177 cortex_a_unset_breakpoint(target, breakpoint);
1178 }
1179
1180 /* Setup single step breakpoint */
1181 stepbreakpoint.address = address;
1182 stepbreakpoint.asid = 0;
1183 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1184 ? 2 : 4;
1185 stepbreakpoint.type = BKPT_HARD;
1186 stepbreakpoint.set = 0;
1187
1188 /* Disable interrupts during single step if requested */
1189 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1190 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1191 if (ERROR_OK != retval)
1192 return retval;
1193 }
1194
1195 /* Break on IVA mismatch */
1196 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1197
1198 target->debug_reason = DBG_REASON_SINGLESTEP;
1199
1200 retval = cortex_a_resume(target, 1, address, 0, 0);
1201 if (retval != ERROR_OK)
1202 return retval;
1203
1204 int64_t then = timeval_ms();
1205 while (target->state != TARGET_HALTED) {
1206 retval = cortex_a_poll(target);
1207 if (retval != ERROR_OK)
1208 return retval;
1209 if (target->state == TARGET_HALTED)
1210 break;
1211 if (timeval_ms() > then + 1000) {
1212 LOG_ERROR("timeout waiting for target halt");
1213 return ERROR_FAIL;
1214 }
1215 }
1216
1217 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1218
1219 /* Re-enable interrupts if they were disabled */
1220 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1221 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1222 if (ERROR_OK != retval)
1223 return retval;
1224 }
1225
1226
1227 target->debug_reason = DBG_REASON_BREAKPOINT;
1228
1229 if (breakpoint)
1230 cortex_a_set_breakpoint(target, breakpoint, 0);
1231
1232 if (target->state != TARGET_HALTED)
1233 LOG_DEBUG("target stepped");
1234
1235 return ERROR_OK;
1236 }
1237
1238 static int cortex_a_restore_context(struct target *target, bool bpwp)
1239 {
1240 struct armv7a_common *armv7a = target_to_armv7a(target);
1241
1242 LOG_DEBUG(" ");
1243
1244 if (armv7a->pre_restore_context)
1245 armv7a->pre_restore_context(target);
1246
1247 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1248 }
1249
1250 /*
1251 * Cortex-A Breakpoint and watchpoint functions
1252 */
1253
1254 /* Setup hardware Breakpoint Register Pair */
1255 static int cortex_a_set_breakpoint(struct target *target,
1256 struct breakpoint *breakpoint, uint8_t matchmode)
1257 {
1258 int retval;
1259 int brp_i = 0;
1260 uint32_t control;
1261 uint8_t byte_addr_select = 0x0F;
1262 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1263 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1264 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1265
1266 if (breakpoint->set) {
1267 LOG_WARNING("breakpoint already set");
1268 return ERROR_OK;
1269 }
1270
1271 if (breakpoint->type == BKPT_HARD) {
1272 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1273 brp_i++;
1274 if (brp_i >= cortex_a->brp_num) {
1275 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1276 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1277 }
1278 breakpoint->set = brp_i + 1;
1279 if (breakpoint->length == 2)
1280 byte_addr_select = (3 << (breakpoint->address & 0x02));
1281 control = ((matchmode & 0x7) << 20)
1282 | (byte_addr_select << 5)
1283 | (3 << 1) | 1;
1284 brp_list[brp_i].used = 1;
1285 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1286 brp_list[brp_i].control = control;
1287 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1288 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1289 brp_list[brp_i].value);
1290 if (retval != ERROR_OK)
1291 return retval;
1292 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1293 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1294 brp_list[brp_i].control);
1295 if (retval != ERROR_OK)
1296 return retval;
1297 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1298 brp_list[brp_i].control,
1299 brp_list[brp_i].value);
1300 } else if (breakpoint->type == BKPT_SOFT) {
1301 uint8_t code[4];
1302 /* length == 2: Thumb breakpoint */
1303 if (breakpoint->length == 2)
1304 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1305 else
1306 /* length == 3: Thumb-2 breakpoint, actual encoding is
1307 * a regular Thumb BKPT instruction but we replace a
1308 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1309 * length
1310 */
1311 if (breakpoint->length == 3) {
1312 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1313 breakpoint->length = 4;
1314 } else
1315 /* length == 4, normal ARM breakpoint */
1316 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1317
1318 retval = target_read_memory(target,
1319 breakpoint->address & 0xFFFFFFFE,
1320 breakpoint->length, 1,
1321 breakpoint->orig_instr);
1322 if (retval != ERROR_OK)
1323 return retval;
1324
1325 /* make sure data cache is cleaned & invalidated down to PoC */
1326 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1327 armv7a_cache_flush_virt(target, breakpoint->address,
1328 breakpoint->length);
1329 }
1330
1331 retval = target_write_memory(target,
1332 breakpoint->address & 0xFFFFFFFE,
1333 breakpoint->length, 1, code);
1334 if (retval != ERROR_OK)
1335 return retval;
1336
1337 /* update i-cache at breakpoint location */
1338 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1339 breakpoint->length);
1340 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1341 breakpoint->length);
1342
1343 breakpoint->set = 0x11; /* Any nice value but 0 */
1344 }
1345
1346 return ERROR_OK;
1347 }
1348
1349 static int cortex_a_set_context_breakpoint(struct target *target,
1350 struct breakpoint *breakpoint, uint8_t matchmode)
1351 {
1352 int retval = ERROR_FAIL;
1353 int brp_i = 0;
1354 uint32_t control;
1355 uint8_t byte_addr_select = 0x0F;
1356 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1357 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1358 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1359
1360 if (breakpoint->set) {
1361 LOG_WARNING("breakpoint already set");
1362 return retval;
1363 }
1364 /*check available context BRPs*/
1365 while ((brp_list[brp_i].used ||
1366 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1367 brp_i++;
1368
1369 if (brp_i >= cortex_a->brp_num) {
1370 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371 return ERROR_FAIL;
1372 }
1373
1374 breakpoint->set = brp_i + 1;
1375 control = ((matchmode & 0x7) << 20)
1376 | (byte_addr_select << 5)
1377 | (3 << 1) | 1;
1378 brp_list[brp_i].used = 1;
1379 brp_list[brp_i].value = (breakpoint->asid);
1380 brp_list[brp_i].control = control;
1381 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1382 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1383 brp_list[brp_i].value);
1384 if (retval != ERROR_OK)
1385 return retval;
1386 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1387 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1388 brp_list[brp_i].control);
1389 if (retval != ERROR_OK)
1390 return retval;
1391 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1392 brp_list[brp_i].control,
1393 brp_list[brp_i].value);
1394 return ERROR_OK;
1395
1396 }
1397
1398 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1399 {
1400 int retval = ERROR_FAIL;
1401 int brp_1 = 0; /* holds the contextID pair */
1402 int brp_2 = 0; /* holds the IVA pair */
1403 uint32_t control_CTX, control_IVA;
1404 uint8_t CTX_byte_addr_select = 0x0F;
1405 uint8_t IVA_byte_addr_select = 0x0F;
1406 uint8_t CTX_machmode = 0x03;
1407 uint8_t IVA_machmode = 0x01;
1408 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1409 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1410 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1411
1412 if (breakpoint->set) {
1413 LOG_WARNING("breakpoint already set");
1414 return retval;
1415 }
1416 /*check available context BRPs*/
1417 while ((brp_list[brp_1].used ||
1418 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1419 brp_1++;
1420
1421 printf("brp(CTX) found num: %d\n", brp_1);
1422 if (brp_1 >= cortex_a->brp_num) {
1423 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1424 return ERROR_FAIL;
1425 }
1426
1427 while ((brp_list[brp_2].used ||
1428 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1429 brp_2++;
1430
1431 printf("brp(IVA) found num: %d\n", brp_2);
1432 if (brp_2 >= cortex_a->brp_num) {
1433 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1434 return ERROR_FAIL;
1435 }
1436
1437 breakpoint->set = brp_1 + 1;
1438 breakpoint->linked_BRP = brp_2;
1439 control_CTX = ((CTX_machmode & 0x7) << 20)
1440 | (brp_2 << 16)
1441 | (0 << 14)
1442 | (CTX_byte_addr_select << 5)
1443 | (3 << 1) | 1;
1444 brp_list[brp_1].used = 1;
1445 brp_list[brp_1].value = (breakpoint->asid);
1446 brp_list[brp_1].control = control_CTX;
1447 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1448 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1449 brp_list[brp_1].value);
1450 if (retval != ERROR_OK)
1451 return retval;
1452 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1453 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1454 brp_list[brp_1].control);
1455 if (retval != ERROR_OK)
1456 return retval;
1457
1458 control_IVA = ((IVA_machmode & 0x7) << 20)
1459 | (brp_1 << 16)
1460 | (IVA_byte_addr_select << 5)
1461 | (3 << 1) | 1;
1462 brp_list[brp_2].used = 1;
1463 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1464 brp_list[brp_2].control = control_IVA;
1465 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1466 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1467 brp_list[brp_2].value);
1468 if (retval != ERROR_OK)
1469 return retval;
1470 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1471 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1472 brp_list[brp_2].control);
1473 if (retval != ERROR_OK)
1474 return retval;
1475
1476 return ERROR_OK;
1477 }
1478
1479 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1480 {
1481 int retval;
1482 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1483 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1484 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1485
1486 if (!breakpoint->set) {
1487 LOG_WARNING("breakpoint not set");
1488 return ERROR_OK;
1489 }
1490
1491 if (breakpoint->type == BKPT_HARD) {
1492 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1493 int brp_i = breakpoint->set - 1;
1494 int brp_j = breakpoint->linked_BRP;
1495 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1496 LOG_DEBUG("Invalid BRP number in breakpoint");
1497 return ERROR_OK;
1498 }
1499 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1500 brp_list[brp_i].control, brp_list[brp_i].value);
1501 brp_list[brp_i].used = 0;
1502 brp_list[brp_i].value = 0;
1503 brp_list[brp_i].control = 0;
1504 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1505 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1506 brp_list[brp_i].control);
1507 if (retval != ERROR_OK)
1508 return retval;
1509 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1510 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1511 brp_list[brp_i].value);
1512 if (retval != ERROR_OK)
1513 return retval;
1514 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1515 LOG_DEBUG("Invalid BRP number in breakpoint");
1516 return ERROR_OK;
1517 }
1518 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1519 brp_list[brp_j].control, brp_list[brp_j].value);
1520 brp_list[brp_j].used = 0;
1521 brp_list[brp_j].value = 0;
1522 brp_list[brp_j].control = 0;
1523 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1524 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1525 brp_list[brp_j].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1528 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1529 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1530 brp_list[brp_j].value);
1531 if (retval != ERROR_OK)
1532 return retval;
1533 breakpoint->linked_BRP = 0;
1534 breakpoint->set = 0;
1535 return ERROR_OK;
1536
1537 } else {
1538 int brp_i = breakpoint->set - 1;
1539 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1540 LOG_DEBUG("Invalid BRP number in breakpoint");
1541 return ERROR_OK;
1542 }
1543 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1544 brp_list[brp_i].control, brp_list[brp_i].value);
1545 brp_list[brp_i].used = 0;
1546 brp_list[brp_i].value = 0;
1547 brp_list[brp_i].control = 0;
1548 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1549 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1550 brp_list[brp_i].control);
1551 if (retval != ERROR_OK)
1552 return retval;
1553 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1554 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1555 brp_list[brp_i].value);
1556 if (retval != ERROR_OK)
1557 return retval;
1558 breakpoint->set = 0;
1559 return ERROR_OK;
1560 }
1561 } else {
1562
1563 /* make sure data cache is cleaned & invalidated down to PoC */
1564 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1565 armv7a_cache_flush_virt(target, breakpoint->address,
1566 breakpoint->length);
1567 }
1568
1569 /* restore original instruction (kept in target endianness) */
1570 if (breakpoint->length == 4) {
1571 retval = target_write_memory(target,
1572 breakpoint->address & 0xFFFFFFFE,
1573 4, 1, breakpoint->orig_instr);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 } else {
1577 retval = target_write_memory(target,
1578 breakpoint->address & 0xFFFFFFFE,
1579 2, 1, breakpoint->orig_instr);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 }
1583
1584 /* update i-cache at breakpoint location */
1585 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1586 breakpoint->length);
1587 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1588 breakpoint->length);
1589 }
1590 breakpoint->set = 0;
1591
1592 return ERROR_OK;
1593 }
1594
1595 static int cortex_a_add_breakpoint(struct target *target,
1596 struct breakpoint *breakpoint)
1597 {
1598 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1599
1600 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1601 LOG_INFO("no hardware breakpoint available");
1602 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1603 }
1604
1605 if (breakpoint->type == BKPT_HARD)
1606 cortex_a->brp_num_available--;
1607
1608 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1609 }
1610
1611 static int cortex_a_add_context_breakpoint(struct target *target,
1612 struct breakpoint *breakpoint)
1613 {
1614 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1615
1616 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1617 LOG_INFO("no hardware breakpoint available");
1618 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1619 }
1620
1621 if (breakpoint->type == BKPT_HARD)
1622 cortex_a->brp_num_available--;
1623
1624 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1625 }
1626
1627 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1628 struct breakpoint *breakpoint)
1629 {
1630 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1631
1632 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1633 LOG_INFO("no hardware breakpoint available");
1634 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1635 }
1636
1637 if (breakpoint->type == BKPT_HARD)
1638 cortex_a->brp_num_available--;
1639
1640 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1641 }
1642
1643
1644 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1645 {
1646 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1647
1648 #if 0
1649 /* It is perfectly possible to remove breakpoints while the target is running */
1650 if (target->state != TARGET_HALTED) {
1651 LOG_WARNING("target not halted");
1652 return ERROR_TARGET_NOT_HALTED;
1653 }
1654 #endif
1655
1656 if (breakpoint->set) {
1657 cortex_a_unset_breakpoint(target, breakpoint);
1658 if (breakpoint->type == BKPT_HARD)
1659 cortex_a->brp_num_available++;
1660 }
1661
1662
1663 return ERROR_OK;
1664 }
1665
1666 /*
1667 * Cortex-A Reset functions
1668 */
1669
1670 static int cortex_a_assert_reset(struct target *target)
1671 {
1672 struct armv7a_common *armv7a = target_to_armv7a(target);
1673
1674 LOG_DEBUG(" ");
1675
1676 /* FIXME when halt is requested, make it work somehow... */
1677
1678 /* This function can be called in "target not examined" state */
1679
1680 /* Issue some kind of warm reset. */
1681 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1682 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1683 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1684 /* REVISIT handle "pulls" cases, if there's
1685 * hardware that needs them to work.
1686 */
1687
1688 /*
1689 * FIXME: fix reset when transport is SWD. This is a temporary
1690 * work-around for release v0.10 that is not intended to stay!
1691 */
1692 if (transport_is_swd() ||
1693 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1694 jtag_add_reset(0, 1);
1695
1696 } else {
1697 LOG_ERROR("%s: how to reset?", target_name(target));
1698 return ERROR_FAIL;
1699 }
1700
1701 /* registers are now invalid */
1702 if (target_was_examined(target))
1703 register_cache_invalidate(armv7a->arm.core_cache);
1704
1705 target->state = TARGET_RESET;
1706
1707 return ERROR_OK;
1708 }
1709
1710 static int cortex_a_deassert_reset(struct target *target)
1711 {
1712 int retval;
1713
1714 LOG_DEBUG(" ");
1715
1716 /* be certain SRST is off */
1717 jtag_add_reset(0, 0);
1718
1719 if (target_was_examined(target)) {
1720 retval = cortex_a_poll(target);
1721 if (retval != ERROR_OK)
1722 return retval;
1723 }
1724
1725 if (target->reset_halt) {
1726 if (target->state != TARGET_HALTED) {
1727 LOG_WARNING("%s: ran after reset and before halt ...",
1728 target_name(target));
1729 if (target_was_examined(target)) {
1730 retval = target_halt(target);
1731 if (retval != ERROR_OK)
1732 return retval;
1733 } else
1734 target->state = TARGET_UNKNOWN;
1735 }
1736 }
1737
1738 return ERROR_OK;
1739 }
1740
1741 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1742 {
1743 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1744 * New desired mode must be in mode. Current value of DSCR must be in
1745 * *dscr, which is updated with new value.
1746 *
1747 * This function elides actually sending the mode-change over the debug
1748 * interface if the mode is already set as desired.
1749 */
1750 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1751 if (new_dscr != *dscr) {
1752 struct armv7a_common *armv7a = target_to_armv7a(target);
1753 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1754 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1755 if (retval == ERROR_OK)
1756 *dscr = new_dscr;
1757 return retval;
1758 } else {
1759 return ERROR_OK;
1760 }
1761 }
1762
1763 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1764 uint32_t value, uint32_t *dscr)
1765 {
1766 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1767 struct armv7a_common *armv7a = target_to_armv7a(target);
1768 int64_t then = timeval_ms();
1769 int retval;
1770
1771 while ((*dscr & mask) != value) {
1772 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1773 armv7a->debug_base + CPUDBG_DSCR, dscr);
1774 if (retval != ERROR_OK)
1775 return retval;
1776 if (timeval_ms() > then + 1000) {
1777 LOG_ERROR("timeout waiting for DSCR bit change");
1778 return ERROR_FAIL;
1779 }
1780 }
1781 return ERROR_OK;
1782 }
1783
1784 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1785 uint32_t *data, uint32_t *dscr)
1786 {
1787 int retval;
1788 struct armv7a_common *armv7a = target_to_armv7a(target);
1789
1790 /* Move from coprocessor to R0. */
1791 retval = cortex_a_exec_opcode(target, opcode, dscr);
1792 if (retval != ERROR_OK)
1793 return retval;
1794
1795 /* Move from R0 to DTRTX. */
1796 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1797 if (retval != ERROR_OK)
1798 return retval;
1799
1800 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1801 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1802 * must also check TXfull_l). Most of the time this will be free
1803 * because TXfull_l will be set immediately and cached in dscr. */
1804 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1805 DSCR_DTRTX_FULL_LATCHED, dscr);
1806 if (retval != ERROR_OK)
1807 return retval;
1808
1809 /* Read the value transferred to DTRTX. */
1810 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1811 armv7a->debug_base + CPUDBG_DTRTX, data);
1812 if (retval != ERROR_OK)
1813 return retval;
1814
1815 return ERROR_OK;
1816 }
1817
1818 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1819 uint32_t *dfsr, uint32_t *dscr)
1820 {
1821 int retval;
1822
1823 if (dfar) {
1824 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1825 if (retval != ERROR_OK)
1826 return retval;
1827 }
1828
1829 if (dfsr) {
1830 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1831 if (retval != ERROR_OK)
1832 return retval;
1833 }
1834
1835 return ERROR_OK;
1836 }
1837
1838 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1839 uint32_t data, uint32_t *dscr)
1840 {
1841 int retval;
1842 struct armv7a_common *armv7a = target_to_armv7a(target);
1843
1844 /* Write the value into DTRRX. */
1845 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1846 armv7a->debug_base + CPUDBG_DTRRX, data);
1847 if (retval != ERROR_OK)
1848 return retval;
1849
1850 /* Move from DTRRX to R0. */
1851 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1852 if (retval != ERROR_OK)
1853 return retval;
1854
1855 /* Move from R0 to coprocessor. */
1856 retval = cortex_a_exec_opcode(target, opcode, dscr);
1857 if (retval != ERROR_OK)
1858 return retval;
1859
1860 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1861 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1862 * check RXfull_l). Most of the time this will be free because RXfull_l
1863 * will be cleared immediately and cached in dscr. */
1864 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1865 if (retval != ERROR_OK)
1866 return retval;
1867
1868 return ERROR_OK;
1869 }
1870
1871 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1872 uint32_t dfsr, uint32_t *dscr)
1873 {
1874 int retval;
1875
1876 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1877 if (retval != ERROR_OK)
1878 return retval;
1879
1880 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1881 if (retval != ERROR_OK)
1882 return retval;
1883
1884 return ERROR_OK;
1885 }
1886
1887 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1888 {
1889 uint32_t status, upper4;
1890
1891 if (dfsr & (1 << 9)) {
1892 /* LPAE format. */
1893 status = dfsr & 0x3f;
1894 upper4 = status >> 2;
1895 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1896 return ERROR_TARGET_TRANSLATION_FAULT;
1897 else if (status == 33)
1898 return ERROR_TARGET_UNALIGNED_ACCESS;
1899 else
1900 return ERROR_TARGET_DATA_ABORT;
1901 } else {
1902 /* Normal format. */
1903 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1904 if (status == 1)
1905 return ERROR_TARGET_UNALIGNED_ACCESS;
1906 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1907 status == 9 || status == 11 || status == 13 || status == 15)
1908 return ERROR_TARGET_TRANSLATION_FAULT;
1909 else
1910 return ERROR_TARGET_DATA_ABORT;
1911 }
1912 }
1913
1914 static int cortex_a_write_cpu_memory_slow(struct target *target,
1915 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1916 {
1917 /* Writes count objects of size size from *buffer. Old value of DSCR must
1918 * be in *dscr; updated to new value. This is slow because it works for
1919 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1920 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1921 * preferred.
1922 * Preconditions:
1923 * - Address is in R0.
1924 * - R0 is marked dirty.
1925 */
1926 struct armv7a_common *armv7a = target_to_armv7a(target);
1927 struct arm *arm = &armv7a->arm;
1928 int retval;
1929
1930 /* Mark register R1 as dirty, to use for transferring data. */
1931 arm_reg_current(arm, 1)->dirty = true;
1932
1933 /* Switch to non-blocking mode if not already in that mode. */
1934 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1935 if (retval != ERROR_OK)
1936 return retval;
1937
1938 /* Go through the objects. */
1939 while (count) {
1940 /* Write the value to store into DTRRX. */
1941 uint32_t data, opcode;
1942 if (size == 1)
1943 data = *buffer;
1944 else if (size == 2)
1945 data = target_buffer_get_u16(target, buffer);
1946 else
1947 data = target_buffer_get_u32(target, buffer);
1948 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1949 armv7a->debug_base + CPUDBG_DTRRX, data);
1950 if (retval != ERROR_OK)
1951 return retval;
1952
1953 /* Transfer the value from DTRRX to R1. */
1954 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1955 if (retval != ERROR_OK)
1956 return retval;
1957
1958 /* Write the value transferred to R1 into memory. */
1959 if (size == 1)
1960 opcode = ARMV4_5_STRB_IP(1, 0);
1961 else if (size == 2)
1962 opcode = ARMV4_5_STRH_IP(1, 0);
1963 else
1964 opcode = ARMV4_5_STRW_IP(1, 0);
1965 retval = cortex_a_exec_opcode(target, opcode, dscr);
1966 if (retval != ERROR_OK)
1967 return retval;
1968
1969 /* Check for faults and return early. */
1970 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1971 return ERROR_OK; /* A data fault is not considered a system failure. */
1972
1973 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1974 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1975 * must also check RXfull_l). Most of the time this will be free
1976 * because RXfull_l will be cleared immediately and cached in dscr. */
1977 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1978 if (retval != ERROR_OK)
1979 return retval;
1980
1981 /* Advance. */
1982 buffer += size;
1983 --count;
1984 }
1985
1986 return ERROR_OK;
1987 }
1988
1989 static int cortex_a_write_cpu_memory_fast(struct target *target,
1990 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1991 {
1992 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
1993 * in *dscr; updated to new value. This is fast but only works for
1994 * word-sized objects at aligned addresses.
1995 * Preconditions:
1996 * - Address is in R0 and must be a multiple of 4.
1997 * - R0 is marked dirty.
1998 */
1999 struct armv7a_common *armv7a = target_to_armv7a(target);
2000 int retval;
2001
2002 /* Switch to fast mode if not already in that mode. */
2003 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2004 if (retval != ERROR_OK)
2005 return retval;
2006
2007 /* Latch STC instruction. */
2008 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2009 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2010 if (retval != ERROR_OK)
2011 return retval;
2012
2013 /* Transfer all the data and issue all the instructions. */
2014 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2015 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2016 }
2017
2018 static int cortex_a_write_cpu_memory(struct target *target,
2019 uint32_t address, uint32_t size,
2020 uint32_t count, const uint8_t *buffer)
2021 {
2022 /* Write memory through the CPU. */
2023 int retval, final_retval;
2024 struct armv7a_common *armv7a = target_to_armv7a(target);
2025 struct arm *arm = &armv7a->arm;
2026 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2027
2028 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2029 address, size, count);
2030 if (target->state != TARGET_HALTED) {
2031 LOG_WARNING("target not halted");
2032 return ERROR_TARGET_NOT_HALTED;
2033 }
2034
2035 if (!count)
2036 return ERROR_OK;
2037
2038 /* Clear any abort. */
2039 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2040 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2041 if (retval != ERROR_OK)
2042 return retval;
2043
2044 /* Read DSCR. */
2045 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2046 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 /* Switch to non-blocking mode if not already in that mode. */
2051 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2052 if (retval != ERROR_OK)
2053 goto out;
2054
2055 /* Mark R0 as dirty. */
2056 arm_reg_current(arm, 0)->dirty = true;
2057
2058 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2059 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2060 if (retval != ERROR_OK)
2061 goto out;
2062
2063 /* Get the memory address into R0. */
2064 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2065 armv7a->debug_base + CPUDBG_DTRRX, address);
2066 if (retval != ERROR_OK)
2067 goto out;
2068 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2069 if (retval != ERROR_OK)
2070 goto out;
2071
2072 if (size == 4 && (address % 4) == 0) {
2073 /* We are doing a word-aligned transfer, so use fast mode. */
2074 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2075 } else {
2076 /* Use slow path. */
2077 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2078 }
2079
2080 out:
2081 final_retval = retval;
2082
2083 /* Switch to non-blocking mode if not already in that mode. */
2084 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2085 if (final_retval == ERROR_OK)
2086 final_retval = retval;
2087
2088 /* Wait for last issued instruction to complete. */
2089 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2090 if (final_retval == ERROR_OK)
2091 final_retval = retval;
2092
2093 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2094 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2095 * check RXfull_l). Most of the time this will be free because RXfull_l
2096 * will be cleared immediately and cached in dscr. However, don't do this
2097 * if there is fault, because then the instruction might not have completed
2098 * successfully. */
2099 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2100 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2101 if (retval != ERROR_OK)
2102 return retval;
2103 }
2104
2105 /* If there were any sticky abort flags, clear them. */
2106 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2107 fault_dscr = dscr;
2108 mem_ap_write_atomic_u32(armv7a->debug_ap,
2109 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2110 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2111 } else {
2112 fault_dscr = 0;
2113 }
2114
2115 /* Handle synchronous data faults. */
2116 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2117 if (final_retval == ERROR_OK) {
2118 /* Final return value will reflect cause of fault. */
2119 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2120 if (retval == ERROR_OK) {
2121 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2122 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2123 } else
2124 final_retval = retval;
2125 }
2126 /* Fault destroyed DFAR/DFSR; restore them. */
2127 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2128 if (retval != ERROR_OK)
2129 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2130 }
2131
2132 /* Handle asynchronous data faults. */
2133 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2134 if (final_retval == ERROR_OK)
2135 /* No other error has been recorded so far, so keep this one. */
2136 final_retval = ERROR_TARGET_DATA_ABORT;
2137 }
2138
2139 /* If the DCC is nonempty, clear it. */
2140 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2141 uint32_t dummy;
2142 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2143 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2144 if (final_retval == ERROR_OK)
2145 final_retval = retval;
2146 }
2147 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2148 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2149 if (final_retval == ERROR_OK)
2150 final_retval = retval;
2151 }
2152
2153 /* Done. */
2154 return final_retval;
2155 }
2156
2157 static int cortex_a_read_cpu_memory_slow(struct target *target,
2158 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2159 {
2160 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2161 * in *dscr; updated to new value. This is slow because it works for
2162 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2163 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2164 * preferred.
2165 * Preconditions:
2166 * - Address is in R0.
2167 * - R0 is marked dirty.
2168 */
2169 struct armv7a_common *armv7a = target_to_armv7a(target);
2170 struct arm *arm = &armv7a->arm;
2171 int retval;
2172
2173 /* Mark register R1 as dirty, to use for transferring data. */
2174 arm_reg_current(arm, 1)->dirty = true;
2175
2176 /* Switch to non-blocking mode if not already in that mode. */
2177 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2178 if (retval != ERROR_OK)
2179 return retval;
2180
2181 /* Go through the objects. */
2182 while (count) {
2183 /* Issue a load of the appropriate size to R1. */
2184 uint32_t opcode, data;
2185 if (size == 1)
2186 opcode = ARMV4_5_LDRB_IP(1, 0);
2187 else if (size == 2)
2188 opcode = ARMV4_5_LDRH_IP(1, 0);
2189 else
2190 opcode = ARMV4_5_LDRW_IP(1, 0);
2191 retval = cortex_a_exec_opcode(target, opcode, dscr);
2192 if (retval != ERROR_OK)
2193 return retval;
2194
2195 /* Issue a write of R1 to DTRTX. */
2196 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2197 if (retval != ERROR_OK)
2198 return retval;
2199
2200 /* Check for faults and return early. */
2201 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2202 return ERROR_OK; /* A data fault is not considered a system failure. */
2203
2204 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2205 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2206 * must also check TXfull_l). Most of the time this will be free
2207 * because TXfull_l will be set immediately and cached in dscr. */
2208 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2209 DSCR_DTRTX_FULL_LATCHED, dscr);
2210 if (retval != ERROR_OK)
2211 return retval;
2212
2213 /* Read the value transferred to DTRTX into the buffer. */
2214 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2215 armv7a->debug_base + CPUDBG_DTRTX, &data);
2216 if (retval != ERROR_OK)
2217 return retval;
2218 if (size == 1)
2219 *buffer = (uint8_t) data;
2220 else if (size == 2)
2221 target_buffer_set_u16(target, buffer, (uint16_t) data);
2222 else
2223 target_buffer_set_u32(target, buffer, data);
2224
2225 /* Advance. */
2226 buffer += size;
2227 --count;
2228 }
2229
2230 return ERROR_OK;
2231 }
2232
2233 static int cortex_a_read_cpu_memory_fast(struct target *target,
2234 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2235 {
2236 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2237 * *dscr; updated to new value. This is fast but only works for word-sized
2238 * objects at aligned addresses.
2239 * Preconditions:
2240 * - Address is in R0 and must be a multiple of 4.
2241 * - R0 is marked dirty.
2242 */
2243 struct armv7a_common *armv7a = target_to_armv7a(target);
2244 uint32_t u32;
2245 int retval;
2246
2247 /* Switch to non-blocking mode if not already in that mode. */
2248 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2249 if (retval != ERROR_OK)
2250 return retval;
2251
2252 /* Issue the LDC instruction via a write to ITR. */
2253 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2254 if (retval != ERROR_OK)
2255 return retval;
2256
2257 count--;
2258
2259 if (count > 0) {
2260 /* Switch to fast mode if not already in that mode. */
2261 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2262 if (retval != ERROR_OK)
2263 return retval;
2264
2265 /* Latch LDC instruction. */
2266 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2267 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2268 if (retval != ERROR_OK)
2269 return retval;
2270
2271 /* Read the value transferred to DTRTX into the buffer. Due to fast
2272 * mode rules, this blocks until the instruction finishes executing and
2273 * then reissues the read instruction to read the next word from
2274 * memory. The last read of DTRTX in this call reads the second-to-last
2275 * word from memory and issues the read instruction for the last word.
2276 */
2277 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2278 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2279 if (retval != ERROR_OK)
2280 return retval;
2281
2282 /* Advance. */
2283 buffer += count * 4;
2284 }
2285
2286 /* Wait for last issued instruction to complete. */
2287 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2288 if (retval != ERROR_OK)
2289 return retval;
2290
2291 /* Switch to non-blocking mode if not already in that mode. */
2292 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2293 if (retval != ERROR_OK)
2294 return retval;
2295
2296 /* Check for faults and return early. */
2297 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2298 return ERROR_OK; /* A data fault is not considered a system failure. */
2299
2300 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2301 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2302 * check TXfull_l). Most of the time this will be free because TXfull_l
2303 * will be set immediately and cached in dscr. */
2304 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2305 DSCR_DTRTX_FULL_LATCHED, dscr);
2306 if (retval != ERROR_OK)
2307 return retval;
2308
2309 /* Read the value transferred to DTRTX into the buffer. This is the last
2310 * word. */
2311 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2312 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2313 if (retval != ERROR_OK)
2314 return retval;
2315 target_buffer_set_u32(target, buffer, u32);
2316
2317 return ERROR_OK;
2318 }
2319
2320 static int cortex_a_read_cpu_memory(struct target *target,
2321 uint32_t address, uint32_t size,
2322 uint32_t count, uint8_t *buffer)
2323 {
2324 /* Read memory through the CPU. */
2325 int retval, final_retval;
2326 struct armv7a_common *armv7a = target_to_armv7a(target);
2327 struct arm *arm = &armv7a->arm;
2328 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2329
2330 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2331 address, size, count);
2332 if (target->state != TARGET_HALTED) {
2333 LOG_WARNING("target not halted");
2334 return ERROR_TARGET_NOT_HALTED;
2335 }
2336
2337 if (!count)
2338 return ERROR_OK;
2339
2340 /* Clear any abort. */
2341 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2342 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2343 if (retval != ERROR_OK)
2344 return retval;
2345
2346 /* Read DSCR */
2347 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2348 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2349 if (retval != ERROR_OK)
2350 return retval;
2351
2352 /* Switch to non-blocking mode if not already in that mode. */
2353 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2354 if (retval != ERROR_OK)
2355 goto out;
2356
2357 /* Mark R0 as dirty. */
2358 arm_reg_current(arm, 0)->dirty = true;
2359
2360 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2361 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2362 if (retval != ERROR_OK)
2363 goto out;
2364
2365 /* Get the memory address into R0. */
2366 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2367 armv7a->debug_base + CPUDBG_DTRRX, address);
2368 if (retval != ERROR_OK)
2369 goto out;
2370 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2371 if (retval != ERROR_OK)
2372 goto out;
2373
2374 if (size == 4 && (address % 4) == 0) {
2375 /* We are doing a word-aligned transfer, so use fast mode. */
2376 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2377 } else {
2378 /* Use slow path. */
2379 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2380 }
2381
2382 out:
2383 final_retval = retval;
2384
2385 /* Switch to non-blocking mode if not already in that mode. */
2386 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2387 if (final_retval == ERROR_OK)
2388 final_retval = retval;
2389
2390 /* Wait for last issued instruction to complete. */
2391 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2392 if (final_retval == ERROR_OK)
2393 final_retval = retval;
2394
2395 /* If there were any sticky abort flags, clear them. */
2396 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2397 fault_dscr = dscr;
2398 mem_ap_write_atomic_u32(armv7a->debug_ap,
2399 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2400 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2401 } else {
2402 fault_dscr = 0;
2403 }
2404
2405 /* Handle synchronous data faults. */
2406 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2407 if (final_retval == ERROR_OK) {
2408 /* Final return value will reflect cause of fault. */
2409 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2410 if (retval == ERROR_OK) {
2411 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2412 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2413 } else
2414 final_retval = retval;
2415 }
2416 /* Fault destroyed DFAR/DFSR; restore them. */
2417 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2418 if (retval != ERROR_OK)
2419 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2420 }
2421
2422 /* Handle asynchronous data faults. */
2423 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2424 if (final_retval == ERROR_OK)
2425 /* No other error has been recorded so far, so keep this one. */
2426 final_retval = ERROR_TARGET_DATA_ABORT;
2427 }
2428
2429 /* If the DCC is nonempty, clear it. */
2430 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2431 uint32_t dummy;
2432 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2433 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2434 if (final_retval == ERROR_OK)
2435 final_retval = retval;
2436 }
2437 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2438 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2439 if (final_retval == ERROR_OK)
2440 final_retval = retval;
2441 }
2442
2443 /* Done. */
2444 return final_retval;
2445 }
2446
2447
2448 /*
2449 * Cortex-A Memory access
2450 *
2451 * This is same Cortex-M3 but we must also use the correct
2452 * ap number for every access.
2453 */
2454
2455 static int cortex_a_read_phys_memory(struct target *target,
2456 target_addr_t address, uint32_t size,
2457 uint32_t count, uint8_t *buffer)
2458 {
2459 int retval;
2460
2461 if (!count || !buffer)
2462 return ERROR_COMMAND_SYNTAX_ERROR;
2463
2464 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2465 address, size, count);
2466
2467 /* read memory through the CPU */
2468 cortex_a_prep_memaccess(target, 1);
2469 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2470 cortex_a_post_memaccess(target, 1);
2471
2472 return retval;
2473 }
2474
2475 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2476 uint32_t size, uint32_t count, uint8_t *buffer)
2477 {
2478 int retval;
2479
2480 /* cortex_a handles unaligned memory access */
2481 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2482 address, size, count);
2483
2484 cortex_a_prep_memaccess(target, 0);
2485 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2486 cortex_a_post_memaccess(target, 0);
2487
2488 return retval;
2489 }
2490
2491 static int cortex_a_write_phys_memory(struct target *target,
2492 target_addr_t address, uint32_t size,
2493 uint32_t count, const uint8_t *buffer)
2494 {
2495 int retval;
2496
2497 if (!count || !buffer)
2498 return ERROR_COMMAND_SYNTAX_ERROR;
2499
2500 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2501 address, size, count);
2502
2503 /* write memory through the CPU */
2504 cortex_a_prep_memaccess(target, 1);
2505 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2506 cortex_a_post_memaccess(target, 1);
2507
2508 return retval;
2509 }
2510
2511 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2512 uint32_t size, uint32_t count, const uint8_t *buffer)
2513 {
2514 int retval;
2515
2516 /* cortex_a handles unaligned memory access */
2517 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2518 address, size, count);
2519
2520 /* memory writes bypass the caches, must flush before writing */
2521 armv7a_cache_auto_flush_on_write(target, address, size * count);
2522
2523 cortex_a_prep_memaccess(target, 0);
2524 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2525 cortex_a_post_memaccess(target, 0);
2526 return retval;
2527 }
2528
2529 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2530 uint32_t count, uint8_t *buffer)
2531 {
2532 uint32_t size;
2533
2534 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2535 * will have something to do with the size we leave to it. */
2536 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2537 if (address & size) {
2538 int retval = target_read_memory(target, address, size, 1, buffer);
2539 if (retval != ERROR_OK)
2540 return retval;
2541 address += size;
2542 count -= size;
2543 buffer += size;
2544 }
2545 }
2546
2547 /* Read the data with as large access size as possible. */
2548 for (; size > 0; size /= 2) {
2549 uint32_t aligned = count - count % size;
2550 if (aligned > 0) {
2551 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2552 if (retval != ERROR_OK)
2553 return retval;
2554 address += aligned;
2555 count -= aligned;
2556 buffer += aligned;
2557 }
2558 }
2559
2560 return ERROR_OK;
2561 }
2562
2563 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2564 uint32_t count, const uint8_t *buffer)
2565 {
2566 uint32_t size;
2567
2568 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2569 * will have something to do with the size we leave to it. */
2570 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2571 if (address & size) {
2572 int retval = target_write_memory(target, address, size, 1, buffer);
2573 if (retval != ERROR_OK)
2574 return retval;
2575 address += size;
2576 count -= size;
2577 buffer += size;
2578 }
2579 }
2580
2581 /* Write the data with as large access size as possible. */
2582 for (; size > 0; size /= 2) {
2583 uint32_t aligned = count - count % size;
2584 if (aligned > 0) {
2585 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2586 if (retval != ERROR_OK)
2587 return retval;
2588 address += aligned;
2589 count -= aligned;
2590 buffer += aligned;
2591 }
2592 }
2593
2594 return ERROR_OK;
2595 }
2596
2597 static int cortex_a_handle_target_request(void *priv)
2598 {
2599 struct target *target = priv;
2600 struct armv7a_common *armv7a = target_to_armv7a(target);
2601 int retval;
2602
2603 if (!target_was_examined(target))
2604 return ERROR_OK;
2605 if (!target->dbg_msg_enabled)
2606 return ERROR_OK;
2607
2608 if (target->state == TARGET_RUNNING) {
2609 uint32_t request;
2610 uint32_t dscr;
2611 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2612 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2613
2614 /* check if we have data */
2615 int64_t then = timeval_ms();
2616 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2617 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2618 armv7a->debug_base + CPUDBG_DTRTX, &request);
2619 if (retval == ERROR_OK) {
2620 target_request(target, request);
2621 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2622 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2623 }
2624 if (timeval_ms() > then + 1000) {
2625 LOG_ERROR("Timeout waiting for dtr tx full");
2626 return ERROR_FAIL;
2627 }
2628 }
2629 }
2630
2631 return ERROR_OK;
2632 }
2633
2634 /*
2635 * Cortex-A target information and configuration
2636 */
2637
2638 static int cortex_a_examine_first(struct target *target)
2639 {
2640 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2641 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2642 struct adiv5_dap *swjdp = armv7a->arm.dap;
2643
2644 int i;
2645 int retval = ERROR_OK;
2646 uint32_t didr, cpuid, dbg_osreg;
2647
2648 /* Search for the APB-AP - it is needed for access to debug registers */
2649 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2650 if (retval != ERROR_OK) {
2651 LOG_ERROR("Could not find APB-AP for debug access");
2652 return retval;
2653 }
2654
2655 retval = mem_ap_init(armv7a->debug_ap);
2656 if (retval != ERROR_OK) {
2657 LOG_ERROR("Could not initialize the APB-AP");
2658 return retval;
2659 }
2660
2661 armv7a->debug_ap->memaccess_tck = 80;
2662
2663 if (!target->dbgbase_set) {
2664 uint32_t dbgbase;
2665 /* Get ROM Table base */
2666 uint32_t apid;
2667 int32_t coreidx = target->coreid;
2668 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2669 target->cmd_name);
2670 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2671 if (retval != ERROR_OK)
2672 return retval;
2673 /* Lookup 0x15 -- Processor DAP */
2674 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2675 &armv7a->debug_base, &coreidx);
2676 if (retval != ERROR_OK) {
2677 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2678 target->cmd_name);
2679 return retval;
2680 }
2681 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2682 target->coreid, armv7a->debug_base);
2683 } else
2684 armv7a->debug_base = target->dbgbase;
2685
2686 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2687 armv7a->debug_base + CPUDBG_DIDR, &didr);
2688 if (retval != ERROR_OK) {
2689 LOG_DEBUG("Examine %s failed", "DIDR");
2690 return retval;
2691 }
2692
2693 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2694 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2695 if (retval != ERROR_OK) {
2696 LOG_DEBUG("Examine %s failed", "CPUID");
2697 return retval;
2698 }
2699
2700 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2701 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2702
2703 cortex_a->didr = didr;
2704 cortex_a->cpuid = cpuid;
2705
2706 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2707 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2708 if (retval != ERROR_OK)
2709 return retval;
2710 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2711
2712 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2713 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2714 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2715 return ERROR_TARGET_INIT_FAILED;
2716 }
2717
2718 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2719 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2720
2721 /* Read DBGOSLSR and check if OSLK is implemented */
2722 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2723 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2724 if (retval != ERROR_OK)
2725 return retval;
2726 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2727
2728 /* check if OS Lock is implemented */
2729 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2730 /* check if OS Lock is set */
2731 if (dbg_osreg & OSLSR_OSLK) {
2732 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2733
2734 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2735 armv7a->debug_base + CPUDBG_OSLAR,
2736 0);
2737 if (retval == ERROR_OK)
2738 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2739 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2740
2741 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2742 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2743 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2744 target->coreid);
2745 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2746 return ERROR_TARGET_INIT_FAILED;
2747 }
2748 }
2749 }
2750
2751 armv7a->arm.core_type = ARM_MODE_MON;
2752
2753 /* Avoid recreating the registers cache */
2754 if (!target_was_examined(target)) {
2755 retval = cortex_a_dpm_setup(cortex_a, didr);
2756 if (retval != ERROR_OK)
2757 return retval;
2758 }
2759
2760 /* Setup Breakpoint Register Pairs */
2761 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2762 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2763 cortex_a->brp_num_available = cortex_a->brp_num;
2764 free(cortex_a->brp_list);
2765 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2766 /* cortex_a->brb_enabled = ????; */
2767 for (i = 0; i < cortex_a->brp_num; i++) {
2768 cortex_a->brp_list[i].used = 0;
2769 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2770 cortex_a->brp_list[i].type = BRP_NORMAL;
2771 else
2772 cortex_a->brp_list[i].type = BRP_CONTEXT;
2773 cortex_a->brp_list[i].value = 0;
2774 cortex_a->brp_list[i].control = 0;
2775 cortex_a->brp_list[i].BRPn = i;
2776 }
2777
2778 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2779
2780 /* select debug_ap as default */
2781 swjdp->apsel = armv7a->debug_ap->ap_num;
2782
2783 target_set_examined(target);
2784 return ERROR_OK;
2785 }
2786
2787 static int cortex_a_examine(struct target *target)
2788 {
2789 int retval = ERROR_OK;
2790
2791 /* Reestablish communication after target reset */
2792 retval = cortex_a_examine_first(target);
2793
2794 /* Configure core debug access */
2795 if (retval == ERROR_OK)
2796 retval = cortex_a_init_debug_access(target);
2797
2798 return retval;
2799 }
2800
2801 /*
2802 * Cortex-A target creation and initialization
2803 */
2804
2805 static int cortex_a_init_target(struct command_context *cmd_ctx,
2806 struct target *target)
2807 {
2808 /* examine_first() does a bunch of this */
2809 arm_semihosting_init(target);
2810 return ERROR_OK;
2811 }
2812
2813 static int cortex_a_init_arch_info(struct target *target,
2814 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2815 {
2816 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2817
2818 /* Setup struct cortex_a_common */
2819 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2820 armv7a->arm.dap = dap;
2821
2822 /* register arch-specific functions */
2823 armv7a->examine_debug_reason = NULL;
2824
2825 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2826
2827 armv7a->pre_restore_context = NULL;
2828
2829 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2830
2831
2832 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2833
2834 /* REVISIT v7a setup should be in a v7a-specific routine */
2835 armv7a_init_arch_info(target, armv7a);
2836 target_register_timer_callback(cortex_a_handle_target_request, 1,
2837 TARGET_TIMER_TYPE_PERIODIC, target);
2838
2839 return ERROR_OK;
2840 }
2841
2842 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2843 {
2844 struct cortex_a_common *cortex_a;
2845 struct adiv5_private_config *pc;
2846
2847 if (target->private_config == NULL)
2848 return ERROR_FAIL;
2849
2850 pc = (struct adiv5_private_config *)target->private_config;
2851
2852 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2853 if (cortex_a == NULL) {
2854 LOG_ERROR("Out of memory");
2855 return ERROR_FAIL;
2856 }
2857 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2858 cortex_a->armv7a_common.is_armv7r = false;
2859 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2860
2861 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2862 }
2863
2864 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2865 {
2866 struct cortex_a_common *cortex_a;
2867 struct adiv5_private_config *pc;
2868
2869 pc = (struct adiv5_private_config *)target->private_config;
2870 if (adiv5_verify_config(pc) != ERROR_OK)
2871 return ERROR_FAIL;
2872
2873 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2874 if (cortex_a == NULL) {
2875 LOG_ERROR("Out of memory");
2876 return ERROR_FAIL;
2877 }
2878 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2879 cortex_a->armv7a_common.is_armv7r = true;
2880
2881 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2882 }
2883
2884 static void cortex_a_deinit_target(struct target *target)
2885 {
2886 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2887 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2888 struct arm_dpm *dpm = &armv7a->dpm;
2889 uint32_t dscr;
2890 int retval;
2891
2892 if (target_was_examined(target)) {
2893 /* Disable halt for breakpoint, watchpoint and vector catch */
2894 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2895 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2896 if (retval == ERROR_OK)
2897 mem_ap_write_atomic_u32(armv7a->debug_ap,
2898 armv7a->debug_base + CPUDBG_DSCR,
2899 dscr & ~DSCR_HALT_DBG_MODE);
2900 }
2901
2902 free(cortex_a->brp_list);
2903 free(dpm->dbp);
2904 free(dpm->dwp);
2905 free(target->private_config);
2906 free(cortex_a);
2907 }
2908
2909 static int cortex_a_mmu(struct target *target, int *enabled)
2910 {
2911 struct armv7a_common *armv7a = target_to_armv7a(target);
2912
2913 if (target->state != TARGET_HALTED) {
2914 LOG_ERROR("%s: target not halted", __func__);
2915 return ERROR_TARGET_INVALID;
2916 }
2917
2918 if (armv7a->is_armv7r)
2919 *enabled = 0;
2920 else
2921 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2922
2923 return ERROR_OK;
2924 }
2925
2926 static int cortex_a_virt2phys(struct target *target,
2927 target_addr_t virt, target_addr_t *phys)
2928 {
2929 int retval;
2930 int mmu_enabled = 0;
2931
2932 /*
2933 * If the MMU was not enabled at debug entry, there is no
2934 * way of knowing if there was ever a valid configuration
2935 * for it and thus it's not safe to enable it. In this case,
2936 * just return the virtual address as physical.
2937 */
2938 cortex_a_mmu(target, &mmu_enabled);
2939 if (!mmu_enabled) {
2940 *phys = virt;
2941 return ERROR_OK;
2942 }
2943
2944 /* mmu must be enable in order to get a correct translation */
2945 retval = cortex_a_mmu_modify(target, 1);
2946 if (retval != ERROR_OK)
2947 return retval;
2948 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2949 (uint32_t *)phys, 1);
2950 }
2951
2952 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2953 {
2954 struct target *target = get_current_target(CMD_CTX);
2955 struct armv7a_common *armv7a = target_to_armv7a(target);
2956
2957 return armv7a_handle_cache_info_command(CMD_CTX,
2958 &armv7a->armv7a_mmu.armv7a_cache);
2959 }
2960
2961
2962 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2963 {
2964 struct target *target = get_current_target(CMD_CTX);
2965 if (!target_was_examined(target)) {
2966 LOG_ERROR("target not examined yet");
2967 return ERROR_FAIL;
2968 }
2969
2970 return cortex_a_init_debug_access(target);
2971 }
2972
2973 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
2974 {
2975 struct target *target = get_current_target(CMD_CTX);
2976 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2977
2978 static const Jim_Nvp nvp_maskisr_modes[] = {
2979 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
2980 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
2981 { .name = NULL, .value = -1 },
2982 };
2983 const Jim_Nvp *n;
2984
2985 if (CMD_ARGC > 0) {
2986 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2987 if (n->name == NULL) {
2988 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2989 return ERROR_COMMAND_SYNTAX_ERROR;
2990 }
2991
2992 cortex_a->isrmasking_mode = n->value;
2993 }
2994
2995 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
2996 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
2997
2998 return ERROR_OK;
2999 }
3000
3001 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3002 {
3003 struct target *target = get_current_target(CMD_CTX);
3004 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3005
3006 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3007 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3008 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3009 { .name = NULL, .value = -1 },
3010 };
3011 const Jim_Nvp *n;
3012
3013 if (CMD_ARGC > 0) {
3014 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3015 if (n->name == NULL)
3016 return ERROR_COMMAND_SYNTAX_ERROR;
3017 cortex_a->dacrfixup_mode = n->value;
3018
3019 }
3020
3021 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3022 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3023
3024 return ERROR_OK;
3025 }
3026
3027 static const struct command_registration cortex_a_exec_command_handlers[] = {
3028 {
3029 .name = "cache_info",
3030 .handler = cortex_a_handle_cache_info_command,
3031 .mode = COMMAND_EXEC,
3032 .help = "display information about target caches",
3033 .usage = "",
3034 },
3035 {
3036 .name = "dbginit",
3037 .handler = cortex_a_handle_dbginit_command,
3038 .mode = COMMAND_EXEC,
3039 .help = "Initialize core debug",
3040 .usage = "",
3041 },
3042 {
3043 .name = "maskisr",
3044 .handler = handle_cortex_a_mask_interrupts_command,
3045 .mode = COMMAND_ANY,
3046 .help = "mask cortex_a interrupts",
3047 .usage = "['on'|'off']",
3048 },
3049 {
3050 .name = "dacrfixup",
3051 .handler = handle_cortex_a_dacrfixup_command,
3052 .mode = COMMAND_ANY,
3053 .help = "set domain access control (DACR) to all-manager "
3054 "on memory access",
3055 .usage = "['on'|'off']",
3056 },
3057 {
3058 .chain = armv7a_mmu_command_handlers,
3059 },
3060 {
3061 .chain = smp_command_handlers,
3062 },
3063
3064 COMMAND_REGISTRATION_DONE
3065 };
3066 static const struct command_registration cortex_a_command_handlers[] = {
3067 {
3068 .chain = arm_command_handlers,
3069 },
3070 {
3071 .chain = armv7a_command_handlers,
3072 },
3073 {
3074 .name = "cortex_a",
3075 .mode = COMMAND_ANY,
3076 .help = "Cortex-A command group",
3077 .usage = "",
3078 .chain = cortex_a_exec_command_handlers,
3079 },
3080 COMMAND_REGISTRATION_DONE
3081 };
3082
3083 struct target_type cortexa_target = {
3084 .name = "cortex_a",
3085 .deprecated_name = "cortex_a8",
3086
3087 .poll = cortex_a_poll,
3088 .arch_state = armv7a_arch_state,
3089
3090 .halt = cortex_a_halt,
3091 .resume = cortex_a_resume,
3092 .step = cortex_a_step,
3093
3094 .assert_reset = cortex_a_assert_reset,
3095 .deassert_reset = cortex_a_deassert_reset,
3096
3097 /* REVISIT allow exporting VFP3 registers ... */
3098 .get_gdb_arch = arm_get_gdb_arch,
3099 .get_gdb_reg_list = arm_get_gdb_reg_list,
3100
3101 .read_memory = cortex_a_read_memory,
3102 .write_memory = cortex_a_write_memory,
3103
3104 .read_buffer = cortex_a_read_buffer,
3105 .write_buffer = cortex_a_write_buffer,
3106
3107 .checksum_memory = arm_checksum_memory,
3108 .blank_check_memory = arm_blank_check_memory,
3109
3110 .run_algorithm = armv4_5_run_algorithm,
3111
3112 .add_breakpoint = cortex_a_add_breakpoint,
3113 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3114 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3115 .remove_breakpoint = cortex_a_remove_breakpoint,
3116 .add_watchpoint = NULL,
3117 .remove_watchpoint = NULL,
3118
3119 .commands = cortex_a_command_handlers,
3120 .target_create = cortex_a_target_create,
3121 .target_jim_configure = adiv5_jim_configure,
3122 .init_target = cortex_a_init_target,
3123 .examine = cortex_a_examine,
3124 .deinit_target = cortex_a_deinit_target,
3125
3126 .read_phys_memory = cortex_a_read_phys_memory,
3127 .write_phys_memory = cortex_a_write_phys_memory,
3128 .mmu = cortex_a_mmu,
3129 .virt2phys = cortex_a_virt2phys,
3130 };
3131
3132 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3133 {
3134 .name = "dbginit",
3135 .handler = cortex_a_handle_dbginit_command,
3136 .mode = COMMAND_EXEC,
3137 .help = "Initialize core debug",
3138 .usage = "",
3139 },
3140 {
3141 .name = "maskisr",
3142 .handler = handle_cortex_a_mask_interrupts_command,
3143 .mode = COMMAND_EXEC,
3144 .help = "mask cortex_r4 interrupts",
3145 .usage = "['on'|'off']",
3146 },
3147
3148 COMMAND_REGISTRATION_DONE
3149 };
3150 static const struct command_registration cortex_r4_command_handlers[] = {
3151 {
3152 .chain = arm_command_handlers,
3153 },
3154 {
3155 .name = "cortex_r4",
3156 .mode = COMMAND_ANY,
3157 .help = "Cortex-R4 command group",
3158 .usage = "",
3159 .chain = cortex_r4_exec_command_handlers,
3160 },
3161 COMMAND_REGISTRATION_DONE
3162 };
3163
3164 struct target_type cortexr4_target = {
3165 .name = "cortex_r4",
3166
3167 .poll = cortex_a_poll,
3168 .arch_state = armv7a_arch_state,
3169
3170 .halt = cortex_a_halt,
3171 .resume = cortex_a_resume,
3172 .step = cortex_a_step,
3173
3174 .assert_reset = cortex_a_assert_reset,
3175 .deassert_reset = cortex_a_deassert_reset,
3176
3177 /* REVISIT allow exporting VFP3 registers ... */
3178 .get_gdb_arch = arm_get_gdb_arch,
3179 .get_gdb_reg_list = arm_get_gdb_reg_list,
3180
3181 .read_memory = cortex_a_read_phys_memory,
3182 .write_memory = cortex_a_write_phys_memory,
3183
3184 .checksum_memory = arm_checksum_memory,
3185 .blank_check_memory = arm_blank_check_memory,
3186
3187 .run_algorithm = armv4_5_run_algorithm,
3188
3189 .add_breakpoint = cortex_a_add_breakpoint,
3190 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3191 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3192 .remove_breakpoint = cortex_a_remove_breakpoint,
3193 .add_watchpoint = NULL,
3194 .remove_watchpoint = NULL,
3195
3196 .commands = cortex_r4_command_handlers,
3197 .target_create = cortex_r4_target_create,
3198 .target_jim_configure = adiv5_jim_configure,
3199 .init_target = cortex_a_init_target,
3200 .examine = cortex_a_examine,
3201 .deinit_target = cortex_a_deinit_target,
3202 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)