a79b0b90619e42687e5196098dfb1ac503980dbe
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "jtag/interface.h"
59 #include "transport/transport.h"
60 #include "smp.h"
61 #include <helper/time_support.h>
62
63 static int cortex_a_poll(struct target *target);
64 static int cortex_a_debug_entry(struct target *target);
65 static int cortex_a_restore_context(struct target *target, bool bpwp);
66 static int cortex_a_set_breakpoint(struct target *target,
67 struct breakpoint *breakpoint, uint8_t matchmode);
68 static int cortex_a_set_context_breakpoint(struct target *target,
69 struct breakpoint *breakpoint, uint8_t matchmode);
70 static int cortex_a_set_hybrid_breakpoint(struct target *target,
71 struct breakpoint *breakpoint);
72 static int cortex_a_unset_breakpoint(struct target *target,
73 struct breakpoint *breakpoint);
74 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
75 uint32_t value, uint32_t *dscr);
76 static int cortex_a_mmu(struct target *target, int *enabled);
77 static int cortex_a_mmu_modify(struct target *target, int enable);
78 static int cortex_a_virt2phys(struct target *target,
79 target_addr_t virt, target_addr_t *phys);
80 static int cortex_a_read_cpu_memory(struct target *target,
81 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
82
83
84 /* restore cp15_control_reg at resume */
85 static int cortex_a_restore_cp15_control_reg(struct target *target)
86 {
87 int retval = ERROR_OK;
88 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
89 struct armv7a_common *armv7a = target_to_armv7a(target);
90
91 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
92 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
93 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
94 retval = armv7a->arm.mcr(target, 15,
95 0, 0, /* op1, op2 */
96 1, 0, /* CRn, CRm */
97 cortex_a->cp15_control_reg);
98 }
99 return retval;
100 }
101
102 /*
103 * Set up ARM core for memory access.
104 * If !phys_access, switch to SVC mode and make sure MMU is on
105 * If phys_access, switch off mmu
106 */
107 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
108 {
109 struct armv7a_common *armv7a = target_to_armv7a(target);
110 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
111 int mmu_enabled = 0;
112
113 if (phys_access == 0) {
114 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a_mmu(target, &mmu_enabled);
116 if (mmu_enabled)
117 cortex_a_mmu_modify(target, 1);
118 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
119 /* overwrite DACR to all-manager */
120 armv7a->arm.mcr(target, 15,
121 0, 0, 3, 0,
122 0xFFFFFFFF);
123 }
124 } else {
125 cortex_a_mmu(target, &mmu_enabled);
126 if (mmu_enabled)
127 cortex_a_mmu_modify(target, 0);
128 }
129 return ERROR_OK;
130 }
131
132 /*
133 * Restore ARM core after memory access.
134 * If !phys_access, switch to previous mode
135 * If phys_access, restore MMU setting
136 */
137 static int cortex_a_post_memaccess(struct target *target, int phys_access)
138 {
139 struct armv7a_common *armv7a = target_to_armv7a(target);
140 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
141
142 if (phys_access == 0) {
143 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
144 /* restore */
145 armv7a->arm.mcr(target, 15,
146 0, 0, 3, 0,
147 cortex_a->cp15_dacr_reg);
148 }
149 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
150 } else {
151 int mmu_enabled = 0;
152 cortex_a_mmu(target, &mmu_enabled);
153 if (mmu_enabled)
154 cortex_a_mmu_modify(target, 1);
155 }
156 return ERROR_OK;
157 }
158
159
160 /* modify cp15_control_reg in order to enable or disable mmu for :
161 * - virt2phys address conversion
162 * - read or write memory in phys or virt address */
163 static int cortex_a_mmu_modify(struct target *target, int enable)
164 {
165 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
166 struct armv7a_common *armv7a = target_to_armv7a(target);
167 int retval = ERROR_OK;
168 int need_write = 0;
169
170 if (enable) {
171 /* if mmu enabled at target stop and mmu not enable */
172 if (!(cortex_a->cp15_control_reg & 0x1U)) {
173 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
174 return ERROR_FAIL;
175 }
176 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
177 cortex_a->cp15_control_reg_curr |= 0x1U;
178 need_write = 1;
179 }
180 } else {
181 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
182 cortex_a->cp15_control_reg_curr &= ~0x1U;
183 need_write = 1;
184 }
185 }
186
187 if (need_write) {
188 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
189 enable ? "enable mmu" : "disable mmu",
190 cortex_a->cp15_control_reg_curr);
191
192 retval = armv7a->arm.mcr(target, 15,
193 0, 0, /* op1, op2 */
194 1, 0, /* CRn, CRm */
195 cortex_a->cp15_control_reg_curr);
196 }
197 return retval;
198 }
199
200 /*
201 * Cortex-A Basic debug access, very low level assumes state is saved
202 */
203 static int cortex_a_init_debug_access(struct target *target)
204 {
205 struct armv7a_common *armv7a = target_to_armv7a(target);
206 uint32_t dscr;
207 int retval;
208
209 /* lock memory-mapped access to debug registers to prevent
210 * software interference */
211 retval = mem_ap_write_u32(armv7a->debug_ap,
212 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
213 if (retval != ERROR_OK)
214 return retval;
215
216 /* Disable cacheline fills and force cache write-through in debug state */
217 retval = mem_ap_write_u32(armv7a->debug_ap,
218 armv7a->debug_base + CPUDBG_DSCCR, 0);
219 if (retval != ERROR_OK)
220 return retval;
221
222 /* Disable TLB lookup and refill/eviction in debug state */
223 retval = mem_ap_write_u32(armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_DSMCR, 0);
225 if (retval != ERROR_OK)
226 return retval;
227
228 retval = dap_run(armv7a->debug_ap->dap);
229 if (retval != ERROR_OK)
230 return retval;
231
232 /* Enabling of instruction execution in debug mode is done in debug_entry code */
233
234 /* Resync breakpoint registers */
235
236 /* Enable halt for breakpoint, watchpoint and vector catch */
237 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
238 armv7a->debug_base + CPUDBG_DSCR, &dscr);
239 if (retval != ERROR_OK)
240 return retval;
241 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
242 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
243 if (retval != ERROR_OK)
244 return retval;
245
246 /* Since this is likely called from init or reset, update target state information*/
247 return cortex_a_poll(target);
248 }
249
250 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
251 {
252 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
253 * Writes final value of DSCR into *dscr. Pass force to force always
254 * reading DSCR at least once. */
255 struct armv7a_common *armv7a = target_to_armv7a(target);
256 int retval;
257
258 if (force) {
259 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
260 armv7a->debug_base + CPUDBG_DSCR, dscr);
261 if (retval != ERROR_OK) {
262 LOG_ERROR("Could not read DSCR register");
263 return retval;
264 }
265 }
266
267 retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
268 if (retval != ERROR_OK)
269 LOG_ERROR("Error waiting for InstrCompl=1");
270 return retval;
271 }
272
273 /* To reduce needless round-trips, pass in a pointer to the current
274 * DSCR value. Initialize it to zero if you just need to know the
275 * value on return from this function; or DSCR_INSTR_COMP if you
276 * happen to know that no instruction is pending.
277 */
278 static int cortex_a_exec_opcode(struct target *target,
279 uint32_t opcode, uint32_t *dscr_p)
280 {
281 uint32_t dscr;
282 int retval;
283 struct armv7a_common *armv7a = target_to_armv7a(target);
284
285 dscr = dscr_p ? *dscr_p : 0;
286
287 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
288
289 /* Wait for InstrCompl bit to be set */
290 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
291 if (retval != ERROR_OK)
292 return retval;
293
294 retval = mem_ap_write_u32(armv7a->debug_ap,
295 armv7a->debug_base + CPUDBG_ITR, opcode);
296 if (retval != ERROR_OK)
297 return retval;
298
299 /* Wait for InstrCompl bit to be set */
300 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
301 if (retval != ERROR_OK) {
302 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
303 return retval;
304 }
305
306 if (dscr_p)
307 *dscr_p = dscr;
308
309 return retval;
310 }
311
312 /* Write to memory mapped registers directly with no cache or mmu handling */
313 static int cortex_a_dap_write_memap_register_u32(struct target *target,
314 uint32_t address,
315 uint32_t value)
316 {
317 int retval;
318 struct armv7a_common *armv7a = target_to_armv7a(target);
319
320 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
321
322 return retval;
323 }
324
325 /*
326 * Cortex-A implementation of Debug Programmer's Model
327 *
328 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
329 * so there's no need to poll for it before executing an instruction.
330 *
331 * NOTE that in several of these cases the "stall" mode might be useful.
332 * It'd let us queue a few operations together... prepare/finish might
333 * be the places to enable/disable that mode.
334 */
335
336 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
337 {
338 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
339 }
340
341 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
342 {
343 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
344 return mem_ap_write_u32(a->armv7a_common.debug_ap,
345 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
346 }
347
348 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
349 uint32_t *dscr_p)
350 {
351 uint32_t dscr = DSCR_INSTR_COMP;
352 int retval;
353
354 if (dscr_p)
355 dscr = *dscr_p;
356
357 /* Wait for DTRRXfull */
358 retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
359 DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
360 if (retval != ERROR_OK) {
361 LOG_ERROR("Error waiting for read dcc");
362 return retval;
363 }
364
365 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
366 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
367 if (retval != ERROR_OK)
368 return retval;
369 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
370
371 if (dscr_p)
372 *dscr_p = dscr;
373
374 return retval;
375 }
376
377 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
378 {
379 struct cortex_a_common *a = dpm_to_a(dpm);
380 uint32_t dscr;
381 int retval;
382
383 /* set up invariant: INSTR_COMP is set after ever DPM operation */
384 retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
385 if (retval != ERROR_OK) {
386 LOG_ERROR("Error waiting for dpm prepare");
387 return retval;
388 }
389
390 /* this "should never happen" ... */
391 if (dscr & DSCR_DTR_RX_FULL) {
392 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
393 /* Clear DCCRX */
394 retval = cortex_a_exec_opcode(
395 a->armv7a_common.arm.target,
396 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
397 &dscr);
398 if (retval != ERROR_OK)
399 return retval;
400 }
401
402 return retval;
403 }
404
405 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
406 {
407 /* REVISIT what could be done here? */
408 return ERROR_OK;
409 }
410
411 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
412 uint32_t opcode, uint32_t data)
413 {
414 struct cortex_a_common *a = dpm_to_a(dpm);
415 int retval;
416 uint32_t dscr = DSCR_INSTR_COMP;
417
418 retval = cortex_a_write_dcc(a, data);
419 if (retval != ERROR_OK)
420 return retval;
421
422 return cortex_a_exec_opcode(
423 a->armv7a_common.arm.target,
424 opcode,
425 &dscr);
426 }
427
428 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
429 uint8_t rt, uint32_t data)
430 {
431 struct cortex_a_common *a = dpm_to_a(dpm);
432 uint32_t dscr = DSCR_INSTR_COMP;
433 int retval;
434
435 if (rt > 15)
436 return ERROR_TARGET_INVALID;
437
438 retval = cortex_a_write_dcc(a, data);
439 if (retval != ERROR_OK)
440 return retval;
441
442 /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
443 return cortex_a_exec_opcode(
444 a->armv7a_common.arm.target,
445 ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
446 &dscr);
447 }
448
449 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
450 uint32_t opcode, uint32_t data)
451 {
452 struct cortex_a_common *a = dpm_to_a(dpm);
453 uint32_t dscr = DSCR_INSTR_COMP;
454 int retval;
455
456 retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
457 if (retval != ERROR_OK)
458 return retval;
459
460 /* then the opcode, taking data from R0 */
461 retval = cortex_a_exec_opcode(
462 a->armv7a_common.arm.target,
463 opcode,
464 &dscr);
465
466 return retval;
467 }
468
469 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
470 {
471 struct target *target = dpm->arm->target;
472 uint32_t dscr = DSCR_INSTR_COMP;
473
474 /* "Prefetch flush" after modifying execution status in CPSR */
475 return cortex_a_exec_opcode(target,
476 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
477 &dscr);
478 }
479
480 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
481 uint32_t opcode, uint32_t *data)
482 {
483 struct cortex_a_common *a = dpm_to_a(dpm);
484 int retval;
485 uint32_t dscr = DSCR_INSTR_COMP;
486
487 /* the opcode, writing data to DCC */
488 retval = cortex_a_exec_opcode(
489 a->armv7a_common.arm.target,
490 opcode,
491 &dscr);
492 if (retval != ERROR_OK)
493 return retval;
494
495 return cortex_a_read_dcc(a, data, &dscr);
496 }
497
498 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
499 uint8_t rt, uint32_t *data)
500 {
501 struct cortex_a_common *a = dpm_to_a(dpm);
502 uint32_t dscr = DSCR_INSTR_COMP;
503 int retval;
504
505 if (rt > 15)
506 return ERROR_TARGET_INVALID;
507
508 retval = cortex_a_exec_opcode(
509 a->armv7a_common.arm.target,
510 ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
511 &dscr);
512 if (retval != ERROR_OK)
513 return retval;
514
515 return cortex_a_read_dcc(a, data, &dscr);
516 }
517
518 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
519 uint32_t opcode, uint32_t *data)
520 {
521 struct cortex_a_common *a = dpm_to_a(dpm);
522 uint32_t dscr = DSCR_INSTR_COMP;
523 int retval;
524
525 /* the opcode, writing data to R0 */
526 retval = cortex_a_exec_opcode(
527 a->armv7a_common.arm.target,
528 opcode,
529 &dscr);
530 if (retval != ERROR_OK)
531 return retval;
532
533 /* write R0 to DCC */
534 return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
535 }
536
537 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
538 uint32_t addr, uint32_t control)
539 {
540 struct cortex_a_common *a = dpm_to_a(dpm);
541 uint32_t vr = a->armv7a_common.debug_base;
542 uint32_t cr = a->armv7a_common.debug_base;
543 int retval;
544
545 switch (index_t) {
546 case 0 ... 15: /* breakpoints */
547 vr += CPUDBG_BVR_BASE;
548 cr += CPUDBG_BCR_BASE;
549 break;
550 case 16 ... 31: /* watchpoints */
551 vr += CPUDBG_WVR_BASE;
552 cr += CPUDBG_WCR_BASE;
553 index_t -= 16;
554 break;
555 default:
556 return ERROR_FAIL;
557 }
558 vr += 4 * index_t;
559 cr += 4 * index_t;
560
561 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
562 (unsigned) vr, (unsigned) cr);
563
564 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
565 vr, addr);
566 if (retval != ERROR_OK)
567 return retval;
568 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
569 cr, control);
570 return retval;
571 }
572
573 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
574 {
575 struct cortex_a_common *a = dpm_to_a(dpm);
576 uint32_t cr;
577
578 switch (index_t) {
579 case 0 ... 15:
580 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
581 break;
582 case 16 ... 31:
583 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
584 index_t -= 16;
585 break;
586 default:
587 return ERROR_FAIL;
588 }
589 cr += 4 * index_t;
590
591 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
592
593 /* clear control register */
594 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
595 }
596
597 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
598 {
599 struct arm_dpm *dpm = &a->armv7a_common.dpm;
600 int retval;
601
602 dpm->arm = &a->armv7a_common.arm;
603 dpm->didr = didr;
604
605 dpm->prepare = cortex_a_dpm_prepare;
606 dpm->finish = cortex_a_dpm_finish;
607
608 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
609 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
610 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
611
612 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
613 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
614
615 dpm->bpwp_enable = cortex_a_bpwp_enable;
616 dpm->bpwp_disable = cortex_a_bpwp_disable;
617
618 retval = arm_dpm_setup(dpm);
619 if (retval == ERROR_OK)
620 retval = arm_dpm_initialize(dpm);
621
622 return retval;
623 }
624 static struct target *get_cortex_a(struct target *target, int32_t coreid)
625 {
626 struct target_list *head;
627 struct target *curr;
628
629 head = target->head;
630 while (head != (struct target_list *)NULL) {
631 curr = head->target;
632 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
633 return curr;
634 head = head->next;
635 }
636 return target;
637 }
638 static int cortex_a_halt(struct target *target);
639
640 static int cortex_a_halt_smp(struct target *target)
641 {
642 int retval = 0;
643 struct target_list *head;
644 struct target *curr;
645 head = target->head;
646 while (head != (struct target_list *)NULL) {
647 curr = head->target;
648 if ((curr != target) && (curr->state != TARGET_HALTED)
649 && target_was_examined(curr))
650 retval += cortex_a_halt(curr);
651 head = head->next;
652 }
653 return retval;
654 }
655
656 static int update_halt_gdb(struct target *target)
657 {
658 struct target *gdb_target = NULL;
659 struct target_list *head;
660 struct target *curr;
661 int retval = 0;
662
663 if (target->gdb_service && target->gdb_service->core[0] == -1) {
664 target->gdb_service->target = target;
665 target->gdb_service->core[0] = target->coreid;
666 retval += cortex_a_halt_smp(target);
667 }
668
669 if (target->gdb_service)
670 gdb_target = target->gdb_service->target;
671
672 foreach_smp_target(head, target->head) {
673 curr = head->target;
674 /* skip calling context */
675 if (curr == target)
676 continue;
677 if (!target_was_examined(curr))
678 continue;
679 /* skip targets that were already halted */
680 if (curr->state == TARGET_HALTED)
681 continue;
682 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
683 if (curr == gdb_target)
684 continue;
685
686 /* avoid recursion in cortex_a_poll() */
687 curr->smp = 0;
688 cortex_a_poll(curr);
689 curr->smp = 1;
690 }
691
692 /* after all targets were updated, poll the gdb serving target */
693 if (gdb_target != NULL && gdb_target != target)
694 cortex_a_poll(gdb_target);
695 return retval;
696 }
697
698 /*
699 * Cortex-A Run control
700 */
701
702 static int cortex_a_poll(struct target *target)
703 {
704 int retval = ERROR_OK;
705 uint32_t dscr;
706 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
707 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
708 enum target_state prev_target_state = target->state;
709 /* toggle to another core is done by gdb as follow */
710 /* maint packet J core_id */
711 /* continue */
712 /* the next polling trigger an halt event sent to gdb */
713 if ((target->state == TARGET_HALTED) && (target->smp) &&
714 (target->gdb_service) &&
715 (target->gdb_service->target == NULL)) {
716 target->gdb_service->target =
717 get_cortex_a(target, target->gdb_service->core[1]);
718 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
719 return retval;
720 }
721 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
722 armv7a->debug_base + CPUDBG_DSCR, &dscr);
723 if (retval != ERROR_OK)
724 return retval;
725 cortex_a->cpudbg_dscr = dscr;
726
727 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
728 if (prev_target_state != TARGET_HALTED) {
729 /* We have a halting debug event */
730 LOG_DEBUG("Target halted");
731 target->state = TARGET_HALTED;
732
733 retval = cortex_a_debug_entry(target);
734 if (retval != ERROR_OK)
735 return retval;
736
737 if (target->smp) {
738 retval = update_halt_gdb(target);
739 if (retval != ERROR_OK)
740 return retval;
741 }
742
743 if (prev_target_state == TARGET_DEBUG_RUNNING) {
744 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
745 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
746 if (arm_semihosting(target, &retval) != 0)
747 return retval;
748
749 target_call_event_callbacks(target,
750 TARGET_EVENT_HALTED);
751 }
752 }
753 } else
754 target->state = TARGET_RUNNING;
755
756 return retval;
757 }
758
759 static int cortex_a_halt(struct target *target)
760 {
761 int retval;
762 uint32_t dscr;
763 struct armv7a_common *armv7a = target_to_armv7a(target);
764
765 /*
766 * Tell the core to be halted by writing DRCR with 0x1
767 * and then wait for the core to be halted.
768 */
769 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
770 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
771 if (retval != ERROR_OK)
772 return retval;
773
774 dscr = 0; /* force read of dscr */
775 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
776 DSCR_CORE_HALTED, &dscr);
777 if (retval != ERROR_OK) {
778 LOG_ERROR("Error waiting for halt");
779 return retval;
780 }
781
782 target->debug_reason = DBG_REASON_DBGRQ;
783
784 return ERROR_OK;
785 }
786
787 static int cortex_a_internal_restore(struct target *target, int current,
788 target_addr_t *address, int handle_breakpoints, int debug_execution)
789 {
790 struct armv7a_common *armv7a = target_to_armv7a(target);
791 struct arm *arm = &armv7a->arm;
792 int retval;
793 uint32_t resume_pc;
794
795 if (!debug_execution)
796 target_free_all_working_areas(target);
797
798 #if 0
799 if (debug_execution) {
800 /* Disable interrupts */
801 /* We disable interrupts in the PRIMASK register instead of
802 * masking with C_MASKINTS,
803 * This is probably the same issue as Cortex-M3 Errata 377493:
804 * C_MASKINTS in parallel with disabled interrupts can cause
805 * local faults to not be taken. */
806 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
807 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
808 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
809
810 /* Make sure we are in Thumb mode */
811 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
812 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
813 32) | (1 << 24));
814 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
815 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
816 }
817 #endif
818
819 /* current = 1: continue on current pc, otherwise continue at <address> */
820 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
821 if (!current)
822 resume_pc = *address;
823 else
824 *address = resume_pc;
825
826 /* Make sure that the Armv7 gdb thumb fixups does not
827 * kill the return address
828 */
829 switch (arm->core_state) {
830 case ARM_STATE_ARM:
831 resume_pc &= 0xFFFFFFFC;
832 break;
833 case ARM_STATE_THUMB:
834 case ARM_STATE_THUMB_EE:
835 /* When the return address is loaded into PC
836 * bit 0 must be 1 to stay in Thumb state
837 */
838 resume_pc |= 0x1;
839 break;
840 case ARM_STATE_JAZELLE:
841 LOG_ERROR("How do I resume into Jazelle state??");
842 return ERROR_FAIL;
843 case ARM_STATE_AARCH64:
844 LOG_ERROR("Shoudn't be in AARCH64 state");
845 return ERROR_FAIL;
846 }
847 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
848 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
849 arm->pc->dirty = true;
850 arm->pc->valid = true;
851
852 /* restore dpm_mode at system halt */
853 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
854 /* called it now before restoring context because it uses cpu
855 * register r0 for restoring cp15 control register */
856 retval = cortex_a_restore_cp15_control_reg(target);
857 if (retval != ERROR_OK)
858 return retval;
859 retval = cortex_a_restore_context(target, handle_breakpoints);
860 if (retval != ERROR_OK)
861 return retval;
862 target->debug_reason = DBG_REASON_NOTHALTED;
863 target->state = TARGET_RUNNING;
864
865 /* registers are now invalid */
866 register_cache_invalidate(arm->core_cache);
867
868 #if 0
869 /* the front-end may request us not to handle breakpoints */
870 if (handle_breakpoints) {
871 /* Single step past breakpoint at current address */
872 breakpoint = breakpoint_find(target, resume_pc);
873 if (breakpoint) {
874 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
875 cortex_m3_unset_breakpoint(target, breakpoint);
876 cortex_m3_single_step_core(target);
877 cortex_m3_set_breakpoint(target, breakpoint);
878 }
879 }
880
881 #endif
882 return retval;
883 }
884
885 static int cortex_a_internal_restart(struct target *target)
886 {
887 struct armv7a_common *armv7a = target_to_armv7a(target);
888 struct arm *arm = &armv7a->arm;
889 int retval;
890 uint32_t dscr;
891 /*
892 * * Restart core and wait for it to be started. Clear ITRen and sticky
893 * * exception flags: see ARMv7 ARM, C5.9.
894 *
895 * REVISIT: for single stepping, we probably want to
896 * disable IRQs by default, with optional override...
897 */
898
899 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
900 armv7a->debug_base + CPUDBG_DSCR, &dscr);
901 if (retval != ERROR_OK)
902 return retval;
903
904 if ((dscr & DSCR_INSTR_COMP) == 0)
905 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
906
907 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
908 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
909 if (retval != ERROR_OK)
910 return retval;
911
912 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
913 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
914 DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
917
918 dscr = 0; /* force read of dscr */
919 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
920 DSCR_CORE_RESTARTED, &dscr);
921 if (retval != ERROR_OK) {
922 LOG_ERROR("Error waiting for resume");
923 return retval;
924 }
925
926 target->debug_reason = DBG_REASON_NOTHALTED;
927 target->state = TARGET_RUNNING;
928
929 /* registers are now invalid */
930 register_cache_invalidate(arm->core_cache);
931
932 return ERROR_OK;
933 }
934
935 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
936 {
937 int retval = 0;
938 struct target_list *head;
939 struct target *curr;
940 target_addr_t address;
941 head = target->head;
942 while (head != (struct target_list *)NULL) {
943 curr = head->target;
944 if ((curr != target) && (curr->state != TARGET_RUNNING)
945 && target_was_examined(curr)) {
946 /* resume current address , not in step mode */
947 retval += cortex_a_internal_restore(curr, 1, &address,
948 handle_breakpoints, 0);
949 retval += cortex_a_internal_restart(curr);
950 }
951 head = head->next;
952
953 }
954 return retval;
955 }
956
957 static int cortex_a_resume(struct target *target, int current,
958 target_addr_t address, int handle_breakpoints, int debug_execution)
959 {
960 int retval = 0;
961 /* dummy resume for smp toggle in order to reduce gdb impact */
962 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
963 /* simulate a start and halt of target */
964 target->gdb_service->target = NULL;
965 target->gdb_service->core[0] = target->gdb_service->core[1];
966 /* fake resume at next poll we play the target core[1], see poll*/
967 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
968 return 0;
969 }
970 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
971 if (target->smp) {
972 target->gdb_service->core[0] = -1;
973 retval = cortex_a_restore_smp(target, handle_breakpoints);
974 if (retval != ERROR_OK)
975 return retval;
976 }
977 cortex_a_internal_restart(target);
978
979 if (!debug_execution) {
980 target->state = TARGET_RUNNING;
981 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
982 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
983 } else {
984 target->state = TARGET_DEBUG_RUNNING;
985 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
986 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
987 }
988
989 return ERROR_OK;
990 }
991
992 static int cortex_a_debug_entry(struct target *target)
993 {
994 uint32_t dscr;
995 int retval = ERROR_OK;
996 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
997 struct armv7a_common *armv7a = target_to_armv7a(target);
998 struct arm *arm = &armv7a->arm;
999
1000 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1001
1002 /* REVISIT surely we should not re-read DSCR !! */
1003 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1004 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1005 if (retval != ERROR_OK)
1006 return retval;
1007
1008 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1009 * imprecise data aborts get discarded by issuing a Data
1010 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1011 */
1012
1013 /* Enable the ITR execution once we are in debug mode */
1014 dscr |= DSCR_ITR_EN;
1015 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1016 armv7a->debug_base + CPUDBG_DSCR, dscr);
1017 if (retval != ERROR_OK)
1018 return retval;
1019
1020 /* Examine debug reason */
1021 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1022
1023 /* save address of instruction that triggered the watchpoint? */
1024 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1025 uint32_t wfar;
1026
1027 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1028 armv7a->debug_base + CPUDBG_WFAR,
1029 &wfar);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1033 }
1034
1035 /* First load register accessible through core debug port */
1036 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1037 if (retval != ERROR_OK)
1038 return retval;
1039
1040 if (arm->spsr) {
1041 /* read SPSR */
1042 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1043 if (retval != ERROR_OK)
1044 return retval;
1045 }
1046
1047 #if 0
1048 /* TODO, Move this */
1049 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1050 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1051 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1052
1053 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1054 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1055
1056 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1057 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1058 #endif
1059
1060 /* Are we in an exception handler */
1061 /* armv4_5->exception_number = 0; */
1062 if (armv7a->post_debug_entry) {
1063 retval = armv7a->post_debug_entry(target);
1064 if (retval != ERROR_OK)
1065 return retval;
1066 }
1067
1068 return retval;
1069 }
1070
1071 static int cortex_a_post_debug_entry(struct target *target)
1072 {
1073 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1074 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1075 int retval;
1076
1077 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1078 retval = armv7a->arm.mrc(target, 15,
1079 0, 0, /* op1, op2 */
1080 1, 0, /* CRn, CRm */
1081 &cortex_a->cp15_control_reg);
1082 if (retval != ERROR_OK)
1083 return retval;
1084 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1085 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1086
1087 if (!armv7a->is_armv7r)
1088 armv7a_read_ttbcr(target);
1089
1090 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1091 armv7a_identify_cache(target);
1092
1093 if (armv7a->is_armv7r) {
1094 armv7a->armv7a_mmu.mmu_enabled = 0;
1095 } else {
1096 armv7a->armv7a_mmu.mmu_enabled =
1097 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1098 }
1099 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1100 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1101 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1102 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1103 cortex_a->curr_mode = armv7a->arm.core_mode;
1104
1105 /* switch to SVC mode to read DACR */
1106 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1107 armv7a->arm.mrc(target, 15,
1108 0, 0, 3, 0,
1109 &cortex_a->cp15_dacr_reg);
1110
1111 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1112 cortex_a->cp15_dacr_reg);
1113
1114 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1115 return ERROR_OK;
1116 }
1117
1118 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1119 {
1120 struct armv7a_common *armv7a = target_to_armv7a(target);
1121 uint32_t dscr;
1122
1123 /* Read DSCR */
1124 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1125 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1126 if (ERROR_OK != retval)
1127 return retval;
1128
1129 /* clear bitfield */
1130 dscr &= ~bit_mask;
1131 /* put new value */
1132 dscr |= value & bit_mask;
1133
1134 /* write new DSCR */
1135 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1136 armv7a->debug_base + CPUDBG_DSCR, dscr);
1137 return retval;
1138 }
1139
1140 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1141 int handle_breakpoints)
1142 {
1143 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1144 struct armv7a_common *armv7a = target_to_armv7a(target);
1145 struct arm *arm = &armv7a->arm;
1146 struct breakpoint *breakpoint = NULL;
1147 struct breakpoint stepbreakpoint;
1148 struct reg *r;
1149 int retval;
1150
1151 if (target->state != TARGET_HALTED) {
1152 LOG_WARNING("target not halted");
1153 return ERROR_TARGET_NOT_HALTED;
1154 }
1155
1156 /* current = 1: continue on current pc, otherwise continue at <address> */
1157 r = arm->pc;
1158 if (!current)
1159 buf_set_u32(r->value, 0, 32, address);
1160 else
1161 address = buf_get_u32(r->value, 0, 32);
1162
1163 /* The front-end may request us not to handle breakpoints.
1164 * But since Cortex-A uses breakpoint for single step,
1165 * we MUST handle breakpoints.
1166 */
1167 handle_breakpoints = 1;
1168 if (handle_breakpoints) {
1169 breakpoint = breakpoint_find(target, address);
1170 if (breakpoint)
1171 cortex_a_unset_breakpoint(target, breakpoint);
1172 }
1173
1174 /* Setup single step breakpoint */
1175 stepbreakpoint.address = address;
1176 stepbreakpoint.asid = 0;
1177 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1178 ? 2 : 4;
1179 stepbreakpoint.type = BKPT_HARD;
1180 stepbreakpoint.set = 0;
1181
1182 /* Disable interrupts during single step if requested */
1183 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1184 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1185 if (ERROR_OK != retval)
1186 return retval;
1187 }
1188
1189 /* Break on IVA mismatch */
1190 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1191
1192 target->debug_reason = DBG_REASON_SINGLESTEP;
1193
1194 retval = cortex_a_resume(target, 1, address, 0, 0);
1195 if (retval != ERROR_OK)
1196 return retval;
1197
1198 int64_t then = timeval_ms();
1199 while (target->state != TARGET_HALTED) {
1200 retval = cortex_a_poll(target);
1201 if (retval != ERROR_OK)
1202 return retval;
1203 if (target->state == TARGET_HALTED)
1204 break;
1205 if (timeval_ms() > then + 1000) {
1206 LOG_ERROR("timeout waiting for target halt");
1207 return ERROR_FAIL;
1208 }
1209 }
1210
1211 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1212
1213 /* Re-enable interrupts if they were disabled */
1214 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1215 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1216 if (ERROR_OK != retval)
1217 return retval;
1218 }
1219
1220
1221 target->debug_reason = DBG_REASON_BREAKPOINT;
1222
1223 if (breakpoint)
1224 cortex_a_set_breakpoint(target, breakpoint, 0);
1225
1226 if (target->state != TARGET_HALTED)
1227 LOG_DEBUG("target stepped");
1228
1229 return ERROR_OK;
1230 }
1231
1232 static int cortex_a_restore_context(struct target *target, bool bpwp)
1233 {
1234 struct armv7a_common *armv7a = target_to_armv7a(target);
1235
1236 LOG_DEBUG(" ");
1237
1238 if (armv7a->pre_restore_context)
1239 armv7a->pre_restore_context(target);
1240
1241 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1242 }
1243
1244 /*
1245 * Cortex-A Breakpoint and watchpoint functions
1246 */
1247
1248 /* Setup hardware Breakpoint Register Pair */
1249 static int cortex_a_set_breakpoint(struct target *target,
1250 struct breakpoint *breakpoint, uint8_t matchmode)
1251 {
1252 int retval;
1253 int brp_i = 0;
1254 uint32_t control;
1255 uint8_t byte_addr_select = 0x0F;
1256 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1257 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1258 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1259
1260 if (breakpoint->set) {
1261 LOG_WARNING("breakpoint already set");
1262 return ERROR_OK;
1263 }
1264
1265 if (breakpoint->type == BKPT_HARD) {
1266 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1267 brp_i++;
1268 if (brp_i >= cortex_a->brp_num) {
1269 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1270 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1271 }
1272 breakpoint->set = brp_i + 1;
1273 if (breakpoint->length == 2)
1274 byte_addr_select = (3 << (breakpoint->address & 0x02));
1275 control = ((matchmode & 0x7) << 20)
1276 | (byte_addr_select << 5)
1277 | (3 << 1) | 1;
1278 brp_list[brp_i].used = 1;
1279 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1280 brp_list[brp_i].control = control;
1281 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1282 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1283 brp_list[brp_i].value);
1284 if (retval != ERROR_OK)
1285 return retval;
1286 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1287 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1288 brp_list[brp_i].control);
1289 if (retval != ERROR_OK)
1290 return retval;
1291 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1292 brp_list[brp_i].control,
1293 brp_list[brp_i].value);
1294 } else if (breakpoint->type == BKPT_SOFT) {
1295 uint8_t code[4];
1296 /* length == 2: Thumb breakpoint */
1297 if (breakpoint->length == 2)
1298 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1299 else
1300 /* length == 3: Thumb-2 breakpoint, actual encoding is
1301 * a regular Thumb BKPT instruction but we replace a
1302 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1303 * length
1304 */
1305 if (breakpoint->length == 3) {
1306 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1307 breakpoint->length = 4;
1308 } else
1309 /* length == 4, normal ARM breakpoint */
1310 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1311
1312 retval = target_read_memory(target,
1313 breakpoint->address & 0xFFFFFFFE,
1314 breakpoint->length, 1,
1315 breakpoint->orig_instr);
1316 if (retval != ERROR_OK)
1317 return retval;
1318
1319 /* make sure data cache is cleaned & invalidated down to PoC */
1320 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1321 armv7a_cache_flush_virt(target, breakpoint->address,
1322 breakpoint->length);
1323 }
1324
1325 retval = target_write_memory(target,
1326 breakpoint->address & 0xFFFFFFFE,
1327 breakpoint->length, 1, code);
1328 if (retval != ERROR_OK)
1329 return retval;
1330
1331 /* update i-cache at breakpoint location */
1332 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1333 breakpoint->length);
1334 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1335 breakpoint->length);
1336
1337 breakpoint->set = 0x11; /* Any nice value but 0 */
1338 }
1339
1340 return ERROR_OK;
1341 }
1342
1343 static int cortex_a_set_context_breakpoint(struct target *target,
1344 struct breakpoint *breakpoint, uint8_t matchmode)
1345 {
1346 int retval = ERROR_FAIL;
1347 int brp_i = 0;
1348 uint32_t control;
1349 uint8_t byte_addr_select = 0x0F;
1350 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1351 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1352 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1353
1354 if (breakpoint->set) {
1355 LOG_WARNING("breakpoint already set");
1356 return retval;
1357 }
1358 /*check available context BRPs*/
1359 while ((brp_list[brp_i].used ||
1360 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1361 brp_i++;
1362
1363 if (brp_i >= cortex_a->brp_num) {
1364 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1365 return ERROR_FAIL;
1366 }
1367
1368 breakpoint->set = brp_i + 1;
1369 control = ((matchmode & 0x7) << 20)
1370 | (byte_addr_select << 5)
1371 | (3 << 1) | 1;
1372 brp_list[brp_i].used = 1;
1373 brp_list[brp_i].value = (breakpoint->asid);
1374 brp_list[brp_i].control = control;
1375 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1376 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1377 brp_list[brp_i].value);
1378 if (retval != ERROR_OK)
1379 return retval;
1380 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1381 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1382 brp_list[brp_i].control);
1383 if (retval != ERROR_OK)
1384 return retval;
1385 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1386 brp_list[brp_i].control,
1387 brp_list[brp_i].value);
1388 return ERROR_OK;
1389
1390 }
1391
1392 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1393 {
1394 int retval = ERROR_FAIL;
1395 int brp_1 = 0; /* holds the contextID pair */
1396 int brp_2 = 0; /* holds the IVA pair */
1397 uint32_t control_CTX, control_IVA;
1398 uint8_t CTX_byte_addr_select = 0x0F;
1399 uint8_t IVA_byte_addr_select = 0x0F;
1400 uint8_t CTX_machmode = 0x03;
1401 uint8_t IVA_machmode = 0x01;
1402 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1403 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1404 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1405
1406 if (breakpoint->set) {
1407 LOG_WARNING("breakpoint already set");
1408 return retval;
1409 }
1410 /*check available context BRPs*/
1411 while ((brp_list[brp_1].used ||
1412 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1413 brp_1++;
1414
1415 printf("brp(CTX) found num: %d\n", brp_1);
1416 if (brp_1 >= cortex_a->brp_num) {
1417 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1418 return ERROR_FAIL;
1419 }
1420
1421 while ((brp_list[brp_2].used ||
1422 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1423 brp_2++;
1424
1425 printf("brp(IVA) found num: %d\n", brp_2);
1426 if (brp_2 >= cortex_a->brp_num) {
1427 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1428 return ERROR_FAIL;
1429 }
1430
1431 breakpoint->set = brp_1 + 1;
1432 breakpoint->linked_BRP = brp_2;
1433 control_CTX = ((CTX_machmode & 0x7) << 20)
1434 | (brp_2 << 16)
1435 | (0 << 14)
1436 | (CTX_byte_addr_select << 5)
1437 | (3 << 1) | 1;
1438 brp_list[brp_1].used = 1;
1439 brp_list[brp_1].value = (breakpoint->asid);
1440 brp_list[brp_1].control = control_CTX;
1441 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1442 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1443 brp_list[brp_1].value);
1444 if (retval != ERROR_OK)
1445 return retval;
1446 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1447 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1448 brp_list[brp_1].control);
1449 if (retval != ERROR_OK)
1450 return retval;
1451
1452 control_IVA = ((IVA_machmode & 0x7) << 20)
1453 | (brp_1 << 16)
1454 | (IVA_byte_addr_select << 5)
1455 | (3 << 1) | 1;
1456 brp_list[brp_2].used = 1;
1457 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1458 brp_list[brp_2].control = control_IVA;
1459 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1460 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1461 brp_list[brp_2].value);
1462 if (retval != ERROR_OK)
1463 return retval;
1464 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1465 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1466 brp_list[brp_2].control);
1467 if (retval != ERROR_OK)
1468 return retval;
1469
1470 return ERROR_OK;
1471 }
1472
1473 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1474 {
1475 int retval;
1476 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1477 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1478 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1479
1480 if (!breakpoint->set) {
1481 LOG_WARNING("breakpoint not set");
1482 return ERROR_OK;
1483 }
1484
1485 if (breakpoint->type == BKPT_HARD) {
1486 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1487 int brp_i = breakpoint->set - 1;
1488 int brp_j = breakpoint->linked_BRP;
1489 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1490 LOG_DEBUG("Invalid BRP number in breakpoint");
1491 return ERROR_OK;
1492 }
1493 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1494 brp_list[brp_i].control, brp_list[brp_i].value);
1495 brp_list[brp_i].used = 0;
1496 brp_list[brp_i].value = 0;
1497 brp_list[brp_i].control = 0;
1498 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1499 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1500 brp_list[brp_i].control);
1501 if (retval != ERROR_OK)
1502 return retval;
1503 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1504 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1505 brp_list[brp_i].value);
1506 if (retval != ERROR_OK)
1507 return retval;
1508 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1509 LOG_DEBUG("Invalid BRP number in breakpoint");
1510 return ERROR_OK;
1511 }
1512 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1513 brp_list[brp_j].control, brp_list[brp_j].value);
1514 brp_list[brp_j].used = 0;
1515 brp_list[brp_j].value = 0;
1516 brp_list[brp_j].control = 0;
1517 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1518 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1519 brp_list[brp_j].control);
1520 if (retval != ERROR_OK)
1521 return retval;
1522 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1523 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1524 brp_list[brp_j].value);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 breakpoint->linked_BRP = 0;
1528 breakpoint->set = 0;
1529 return ERROR_OK;
1530
1531 } else {
1532 int brp_i = breakpoint->set - 1;
1533 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1534 LOG_DEBUG("Invalid BRP number in breakpoint");
1535 return ERROR_OK;
1536 }
1537 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1538 brp_list[brp_i].control, brp_list[brp_i].value);
1539 brp_list[brp_i].used = 0;
1540 brp_list[brp_i].value = 0;
1541 brp_list[brp_i].control = 0;
1542 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1543 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1544 brp_list[brp_i].control);
1545 if (retval != ERROR_OK)
1546 return retval;
1547 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1548 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1549 brp_list[brp_i].value);
1550 if (retval != ERROR_OK)
1551 return retval;
1552 breakpoint->set = 0;
1553 return ERROR_OK;
1554 }
1555 } else {
1556
1557 /* make sure data cache is cleaned & invalidated down to PoC */
1558 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1559 armv7a_cache_flush_virt(target, breakpoint->address,
1560 breakpoint->length);
1561 }
1562
1563 /* restore original instruction (kept in target endianness) */
1564 if (breakpoint->length == 4) {
1565 retval = target_write_memory(target,
1566 breakpoint->address & 0xFFFFFFFE,
1567 4, 1, breakpoint->orig_instr);
1568 if (retval != ERROR_OK)
1569 return retval;
1570 } else {
1571 retval = target_write_memory(target,
1572 breakpoint->address & 0xFFFFFFFE,
1573 2, 1, breakpoint->orig_instr);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 }
1577
1578 /* update i-cache at breakpoint location */
1579 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1580 breakpoint->length);
1581 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1582 breakpoint->length);
1583 }
1584 breakpoint->set = 0;
1585
1586 return ERROR_OK;
1587 }
1588
1589 static int cortex_a_add_breakpoint(struct target *target,
1590 struct breakpoint *breakpoint)
1591 {
1592 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1593
1594 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1595 LOG_INFO("no hardware breakpoint available");
1596 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1597 }
1598
1599 if (breakpoint->type == BKPT_HARD)
1600 cortex_a->brp_num_available--;
1601
1602 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1603 }
1604
1605 static int cortex_a_add_context_breakpoint(struct target *target,
1606 struct breakpoint *breakpoint)
1607 {
1608 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1609
1610 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1611 LOG_INFO("no hardware breakpoint available");
1612 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1613 }
1614
1615 if (breakpoint->type == BKPT_HARD)
1616 cortex_a->brp_num_available--;
1617
1618 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1619 }
1620
1621 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1622 struct breakpoint *breakpoint)
1623 {
1624 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1625
1626 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1627 LOG_INFO("no hardware breakpoint available");
1628 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1629 }
1630
1631 if (breakpoint->type == BKPT_HARD)
1632 cortex_a->brp_num_available--;
1633
1634 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1635 }
1636
1637
1638 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1639 {
1640 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1641
1642 #if 0
1643 /* It is perfectly possible to remove breakpoints while the target is running */
1644 if (target->state != TARGET_HALTED) {
1645 LOG_WARNING("target not halted");
1646 return ERROR_TARGET_NOT_HALTED;
1647 }
1648 #endif
1649
1650 if (breakpoint->set) {
1651 cortex_a_unset_breakpoint(target, breakpoint);
1652 if (breakpoint->type == BKPT_HARD)
1653 cortex_a->brp_num_available++;
1654 }
1655
1656
1657 return ERROR_OK;
1658 }
1659
1660 /*
1661 * Cortex-A Reset functions
1662 */
1663
1664 static int cortex_a_assert_reset(struct target *target)
1665 {
1666 struct armv7a_common *armv7a = target_to_armv7a(target);
1667
1668 LOG_DEBUG(" ");
1669
1670 /* FIXME when halt is requested, make it work somehow... */
1671
1672 /* This function can be called in "target not examined" state */
1673
1674 /* Issue some kind of warm reset. */
1675 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1676 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1677 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1678 /* REVISIT handle "pulls" cases, if there's
1679 * hardware that needs them to work.
1680 */
1681
1682 /*
1683 * FIXME: fix reset when transport is SWD. This is a temporary
1684 * work-around for release v0.10 that is not intended to stay!
1685 */
1686 if (transport_is_swd() ||
1687 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1688 adapter_assert_reset();
1689
1690 } else {
1691 LOG_ERROR("%s: how to reset?", target_name(target));
1692 return ERROR_FAIL;
1693 }
1694
1695 /* registers are now invalid */
1696 if (target_was_examined(target))
1697 register_cache_invalidate(armv7a->arm.core_cache);
1698
1699 target->state = TARGET_RESET;
1700
1701 return ERROR_OK;
1702 }
1703
1704 static int cortex_a_deassert_reset(struct target *target)
1705 {
1706 int retval;
1707
1708 LOG_DEBUG(" ");
1709
1710 /* be certain SRST is off */
1711 adapter_deassert_reset();
1712
1713 if (target_was_examined(target)) {
1714 retval = cortex_a_poll(target);
1715 if (retval != ERROR_OK)
1716 return retval;
1717 }
1718
1719 if (target->reset_halt) {
1720 if (target->state != TARGET_HALTED) {
1721 LOG_WARNING("%s: ran after reset and before halt ...",
1722 target_name(target));
1723 if (target_was_examined(target)) {
1724 retval = target_halt(target);
1725 if (retval != ERROR_OK)
1726 return retval;
1727 } else
1728 target->state = TARGET_UNKNOWN;
1729 }
1730 }
1731
1732 return ERROR_OK;
1733 }
1734
1735 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1736 {
1737 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1738 * New desired mode must be in mode. Current value of DSCR must be in
1739 * *dscr, which is updated with new value.
1740 *
1741 * This function elides actually sending the mode-change over the debug
1742 * interface if the mode is already set as desired.
1743 */
1744 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1745 if (new_dscr != *dscr) {
1746 struct armv7a_common *armv7a = target_to_armv7a(target);
1747 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1748 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1749 if (retval == ERROR_OK)
1750 *dscr = new_dscr;
1751 return retval;
1752 } else {
1753 return ERROR_OK;
1754 }
1755 }
1756
1757 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1758 uint32_t value, uint32_t *dscr)
1759 {
1760 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1761 struct armv7a_common *armv7a = target_to_armv7a(target);
1762 int64_t then;
1763 int retval;
1764
1765 if ((*dscr & mask) == value)
1766 return ERROR_OK;
1767
1768 then = timeval_ms();
1769 while (1) {
1770 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1771 armv7a->debug_base + CPUDBG_DSCR, dscr);
1772 if (retval != ERROR_OK) {
1773 LOG_ERROR("Could not read DSCR register");
1774 return retval;
1775 }
1776 if ((*dscr & mask) == value)
1777 break;
1778 if (timeval_ms() > then + 1000) {
1779 LOG_ERROR("timeout waiting for DSCR bit change");
1780 return ERROR_FAIL;
1781 }
1782 }
1783 return ERROR_OK;
1784 }
1785
1786 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1787 uint32_t *data, uint32_t *dscr)
1788 {
1789 int retval;
1790 struct armv7a_common *armv7a = target_to_armv7a(target);
1791
1792 /* Move from coprocessor to R0. */
1793 retval = cortex_a_exec_opcode(target, opcode, dscr);
1794 if (retval != ERROR_OK)
1795 return retval;
1796
1797 /* Move from R0 to DTRTX. */
1798 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1799 if (retval != ERROR_OK)
1800 return retval;
1801
1802 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1803 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1804 * must also check TXfull_l). Most of the time this will be free
1805 * because TXfull_l will be set immediately and cached in dscr. */
1806 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1807 DSCR_DTRTX_FULL_LATCHED, dscr);
1808 if (retval != ERROR_OK)
1809 return retval;
1810
1811 /* Read the value transferred to DTRTX. */
1812 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1813 armv7a->debug_base + CPUDBG_DTRTX, data);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817 return ERROR_OK;
1818 }
1819
1820 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1821 uint32_t *dfsr, uint32_t *dscr)
1822 {
1823 int retval;
1824
1825 if (dfar) {
1826 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1827 if (retval != ERROR_OK)
1828 return retval;
1829 }
1830
1831 if (dfsr) {
1832 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1833 if (retval != ERROR_OK)
1834 return retval;
1835 }
1836
1837 return ERROR_OK;
1838 }
1839
1840 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1841 uint32_t data, uint32_t *dscr)
1842 {
1843 int retval;
1844 struct armv7a_common *armv7a = target_to_armv7a(target);
1845
1846 /* Write the value into DTRRX. */
1847 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1848 armv7a->debug_base + CPUDBG_DTRRX, data);
1849 if (retval != ERROR_OK)
1850 return retval;
1851
1852 /* Move from DTRRX to R0. */
1853 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1854 if (retval != ERROR_OK)
1855 return retval;
1856
1857 /* Move from R0 to coprocessor. */
1858 retval = cortex_a_exec_opcode(target, opcode, dscr);
1859 if (retval != ERROR_OK)
1860 return retval;
1861
1862 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1863 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1864 * check RXfull_l). Most of the time this will be free because RXfull_l
1865 * will be cleared immediately and cached in dscr. */
1866 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1867 if (retval != ERROR_OK)
1868 return retval;
1869
1870 return ERROR_OK;
1871 }
1872
1873 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1874 uint32_t dfsr, uint32_t *dscr)
1875 {
1876 int retval;
1877
1878 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1879 if (retval != ERROR_OK)
1880 return retval;
1881
1882 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1883 if (retval != ERROR_OK)
1884 return retval;
1885
1886 return ERROR_OK;
1887 }
1888
1889 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1890 {
1891 uint32_t status, upper4;
1892
1893 if (dfsr & (1 << 9)) {
1894 /* LPAE format. */
1895 status = dfsr & 0x3f;
1896 upper4 = status >> 2;
1897 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1898 return ERROR_TARGET_TRANSLATION_FAULT;
1899 else if (status == 33)
1900 return ERROR_TARGET_UNALIGNED_ACCESS;
1901 else
1902 return ERROR_TARGET_DATA_ABORT;
1903 } else {
1904 /* Normal format. */
1905 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1906 if (status == 1)
1907 return ERROR_TARGET_UNALIGNED_ACCESS;
1908 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1909 status == 9 || status == 11 || status == 13 || status == 15)
1910 return ERROR_TARGET_TRANSLATION_FAULT;
1911 else
1912 return ERROR_TARGET_DATA_ABORT;
1913 }
1914 }
1915
1916 static int cortex_a_write_cpu_memory_slow(struct target *target,
1917 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1918 {
1919 /* Writes count objects of size size from *buffer. Old value of DSCR must
1920 * be in *dscr; updated to new value. This is slow because it works for
1921 * non-word-sized objects. Avoid unaligned accesses as they do not work
1922 * on memory address space without "Normal" attribute. If size == 4 and
1923 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1924 * preferred.
1925 * Preconditions:
1926 * - Address is in R0.
1927 * - R0 is marked dirty.
1928 */
1929 struct armv7a_common *armv7a = target_to_armv7a(target);
1930 struct arm *arm = &armv7a->arm;
1931 int retval;
1932
1933 /* Mark register R1 as dirty, to use for transferring data. */
1934 arm_reg_current(arm, 1)->dirty = true;
1935
1936 /* Switch to non-blocking mode if not already in that mode. */
1937 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1938 if (retval != ERROR_OK)
1939 return retval;
1940
1941 /* Go through the objects. */
1942 while (count) {
1943 /* Write the value to store into DTRRX. */
1944 uint32_t data, opcode;
1945 if (size == 1)
1946 data = *buffer;
1947 else if (size == 2)
1948 data = target_buffer_get_u16(target, buffer);
1949 else
1950 data = target_buffer_get_u32(target, buffer);
1951 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1952 armv7a->debug_base + CPUDBG_DTRRX, data);
1953 if (retval != ERROR_OK)
1954 return retval;
1955
1956 /* Transfer the value from DTRRX to R1. */
1957 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 /* Write the value transferred to R1 into memory. */
1962 if (size == 1)
1963 opcode = ARMV4_5_STRB_IP(1, 0);
1964 else if (size == 2)
1965 opcode = ARMV4_5_STRH_IP(1, 0);
1966 else
1967 opcode = ARMV4_5_STRW_IP(1, 0);
1968 retval = cortex_a_exec_opcode(target, opcode, dscr);
1969 if (retval != ERROR_OK)
1970 return retval;
1971
1972 /* Check for faults and return early. */
1973 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1974 return ERROR_OK; /* A data fault is not considered a system failure. */
1975
1976 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1977 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1978 * must also check RXfull_l). Most of the time this will be free
1979 * because RXfull_l will be cleared immediately and cached in dscr. */
1980 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1981 if (retval != ERROR_OK)
1982 return retval;
1983
1984 /* Advance. */
1985 buffer += size;
1986 --count;
1987 }
1988
1989 return ERROR_OK;
1990 }
1991
1992 static int cortex_a_write_cpu_memory_fast(struct target *target,
1993 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1994 {
1995 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
1996 * in *dscr; updated to new value. This is fast but only works for
1997 * word-sized objects at aligned addresses.
1998 * Preconditions:
1999 * - Address is in R0 and must be a multiple of 4.
2000 * - R0 is marked dirty.
2001 */
2002 struct armv7a_common *armv7a = target_to_armv7a(target);
2003 int retval;
2004
2005 /* Switch to fast mode if not already in that mode. */
2006 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2007 if (retval != ERROR_OK)
2008 return retval;
2009
2010 /* Latch STC instruction. */
2011 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2012 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 /* Transfer all the data and issue all the instructions. */
2017 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2018 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2019 }
2020
2021 static int cortex_a_write_cpu_memory(struct target *target,
2022 uint32_t address, uint32_t size,
2023 uint32_t count, const uint8_t *buffer)
2024 {
2025 /* Write memory through the CPU. */
2026 int retval, final_retval;
2027 struct armv7a_common *armv7a = target_to_armv7a(target);
2028 struct arm *arm = &armv7a->arm;
2029 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2030
2031 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2032 address, size, count);
2033 if (target->state != TARGET_HALTED) {
2034 LOG_WARNING("target not halted");
2035 return ERROR_TARGET_NOT_HALTED;
2036 }
2037
2038 if (!count)
2039 return ERROR_OK;
2040
2041 /* Clear any abort. */
2042 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2043 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2044 if (retval != ERROR_OK)
2045 return retval;
2046
2047 /* Read DSCR. */
2048 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2049 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2050 if (retval != ERROR_OK)
2051 return retval;
2052
2053 /* Switch to non-blocking mode if not already in that mode. */
2054 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2055 if (retval != ERROR_OK)
2056 goto out;
2057
2058 /* Mark R0 as dirty. */
2059 arm_reg_current(arm, 0)->dirty = true;
2060
2061 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2062 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2063 if (retval != ERROR_OK)
2064 goto out;
2065
2066 /* Get the memory address into R0. */
2067 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2068 armv7a->debug_base + CPUDBG_DTRRX, address);
2069 if (retval != ERROR_OK)
2070 goto out;
2071 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2072 if (retval != ERROR_OK)
2073 goto out;
2074
2075 if (size == 4 && (address % 4) == 0) {
2076 /* We are doing a word-aligned transfer, so use fast mode. */
2077 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2078 } else {
2079 /* Use slow path. Adjust size for aligned accesses */
2080 switch (address % 4) {
2081 case 1:
2082 case 3:
2083 count *= size;
2084 size = 1;
2085 break;
2086 case 2:
2087 if (size == 4) {
2088 count *= 2;
2089 size = 2;
2090 }
2091 case 0:
2092 default:
2093 break;
2094 }
2095 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2096 }
2097
2098 out:
2099 final_retval = retval;
2100
2101 /* Switch to non-blocking mode if not already in that mode. */
2102 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2103 if (final_retval == ERROR_OK)
2104 final_retval = retval;
2105
2106 /* Wait for last issued instruction to complete. */
2107 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2108 if (final_retval == ERROR_OK)
2109 final_retval = retval;
2110
2111 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2112 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2113 * check RXfull_l). Most of the time this will be free because RXfull_l
2114 * will be cleared immediately and cached in dscr. However, don't do this
2115 * if there is fault, because then the instruction might not have completed
2116 * successfully. */
2117 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2118 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2119 if (retval != ERROR_OK)
2120 return retval;
2121 }
2122
2123 /* If there were any sticky abort flags, clear them. */
2124 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2125 fault_dscr = dscr;
2126 mem_ap_write_atomic_u32(armv7a->debug_ap,
2127 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2128 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2129 } else {
2130 fault_dscr = 0;
2131 }
2132
2133 /* Handle synchronous data faults. */
2134 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2135 if (final_retval == ERROR_OK) {
2136 /* Final return value will reflect cause of fault. */
2137 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2138 if (retval == ERROR_OK) {
2139 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2140 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2141 } else
2142 final_retval = retval;
2143 }
2144 /* Fault destroyed DFAR/DFSR; restore them. */
2145 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2146 if (retval != ERROR_OK)
2147 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2148 }
2149
2150 /* Handle asynchronous data faults. */
2151 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2152 if (final_retval == ERROR_OK)
2153 /* No other error has been recorded so far, so keep this one. */
2154 final_retval = ERROR_TARGET_DATA_ABORT;
2155 }
2156
2157 /* If the DCC is nonempty, clear it. */
2158 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2159 uint32_t dummy;
2160 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2161 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2162 if (final_retval == ERROR_OK)
2163 final_retval = retval;
2164 }
2165 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2166 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2167 if (final_retval == ERROR_OK)
2168 final_retval = retval;
2169 }
2170
2171 /* Done. */
2172 return final_retval;
2173 }
2174
2175 static int cortex_a_read_cpu_memory_slow(struct target *target,
2176 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2177 {
2178 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2179 * in *dscr; updated to new value. This is slow because it works for
2180 * non-word-sized objects. Avoid unaligned accesses as they do not work
2181 * on memory address space without "Normal" attribute. If size == 4 and
2182 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2183 * preferred.
2184 * Preconditions:
2185 * - Address is in R0.
2186 * - R0 is marked dirty.
2187 */
2188 struct armv7a_common *armv7a = target_to_armv7a(target);
2189 struct arm *arm = &armv7a->arm;
2190 int retval;
2191
2192 /* Mark register R1 as dirty, to use for transferring data. */
2193 arm_reg_current(arm, 1)->dirty = true;
2194
2195 /* Switch to non-blocking mode if not already in that mode. */
2196 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2197 if (retval != ERROR_OK)
2198 return retval;
2199
2200 /* Go through the objects. */
2201 while (count) {
2202 /* Issue a load of the appropriate size to R1. */
2203 uint32_t opcode, data;
2204 if (size == 1)
2205 opcode = ARMV4_5_LDRB_IP(1, 0);
2206 else if (size == 2)
2207 opcode = ARMV4_5_LDRH_IP(1, 0);
2208 else
2209 opcode = ARMV4_5_LDRW_IP(1, 0);
2210 retval = cortex_a_exec_opcode(target, opcode, dscr);
2211 if (retval != ERROR_OK)
2212 return retval;
2213
2214 /* Issue a write of R1 to DTRTX. */
2215 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2216 if (retval != ERROR_OK)
2217 return retval;
2218
2219 /* Check for faults and return early. */
2220 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2221 return ERROR_OK; /* A data fault is not considered a system failure. */
2222
2223 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2224 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2225 * must also check TXfull_l). Most of the time this will be free
2226 * because TXfull_l will be set immediately and cached in dscr. */
2227 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2228 DSCR_DTRTX_FULL_LATCHED, dscr);
2229 if (retval != ERROR_OK)
2230 return retval;
2231
2232 /* Read the value transferred to DTRTX into the buffer. */
2233 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2234 armv7a->debug_base + CPUDBG_DTRTX, &data);
2235 if (retval != ERROR_OK)
2236 return retval;
2237 if (size == 1)
2238 *buffer = (uint8_t) data;
2239 else if (size == 2)
2240 target_buffer_set_u16(target, buffer, (uint16_t) data);
2241 else
2242 target_buffer_set_u32(target, buffer, data);
2243
2244 /* Advance. */
2245 buffer += size;
2246 --count;
2247 }
2248
2249 return ERROR_OK;
2250 }
2251
2252 static int cortex_a_read_cpu_memory_fast(struct target *target,
2253 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2254 {
2255 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2256 * *dscr; updated to new value. This is fast but only works for word-sized
2257 * objects at aligned addresses.
2258 * Preconditions:
2259 * - Address is in R0 and must be a multiple of 4.
2260 * - R0 is marked dirty.
2261 */
2262 struct armv7a_common *armv7a = target_to_armv7a(target);
2263 uint32_t u32;
2264 int retval;
2265
2266 /* Switch to non-blocking mode if not already in that mode. */
2267 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2268 if (retval != ERROR_OK)
2269 return retval;
2270
2271 /* Issue the LDC instruction via a write to ITR. */
2272 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2273 if (retval != ERROR_OK)
2274 return retval;
2275
2276 count--;
2277
2278 if (count > 0) {
2279 /* Switch to fast mode if not already in that mode. */
2280 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2281 if (retval != ERROR_OK)
2282 return retval;
2283
2284 /* Latch LDC instruction. */
2285 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2286 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2287 if (retval != ERROR_OK)
2288 return retval;
2289
2290 /* Read the value transferred to DTRTX into the buffer. Due to fast
2291 * mode rules, this blocks until the instruction finishes executing and
2292 * then reissues the read instruction to read the next word from
2293 * memory. The last read of DTRTX in this call reads the second-to-last
2294 * word from memory and issues the read instruction for the last word.
2295 */
2296 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2297 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2298 if (retval != ERROR_OK)
2299 return retval;
2300
2301 /* Advance. */
2302 buffer += count * 4;
2303 }
2304
2305 /* Wait for last issued instruction to complete. */
2306 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2307 if (retval != ERROR_OK)
2308 return retval;
2309
2310 /* Switch to non-blocking mode if not already in that mode. */
2311 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2312 if (retval != ERROR_OK)
2313 return retval;
2314
2315 /* Check for faults and return early. */
2316 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2317 return ERROR_OK; /* A data fault is not considered a system failure. */
2318
2319 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2320 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2321 * check TXfull_l). Most of the time this will be free because TXfull_l
2322 * will be set immediately and cached in dscr. */
2323 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2324 DSCR_DTRTX_FULL_LATCHED, dscr);
2325 if (retval != ERROR_OK)
2326 return retval;
2327
2328 /* Read the value transferred to DTRTX into the buffer. This is the last
2329 * word. */
2330 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2331 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2332 if (retval != ERROR_OK)
2333 return retval;
2334 target_buffer_set_u32(target, buffer, u32);
2335
2336 return ERROR_OK;
2337 }
2338
2339 static int cortex_a_read_cpu_memory(struct target *target,
2340 uint32_t address, uint32_t size,
2341 uint32_t count, uint8_t *buffer)
2342 {
2343 /* Read memory through the CPU. */
2344 int retval, final_retval;
2345 struct armv7a_common *armv7a = target_to_armv7a(target);
2346 struct arm *arm = &armv7a->arm;
2347 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2348
2349 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2350 address, size, count);
2351 if (target->state != TARGET_HALTED) {
2352 LOG_WARNING("target not halted");
2353 return ERROR_TARGET_NOT_HALTED;
2354 }
2355
2356 if (!count)
2357 return ERROR_OK;
2358
2359 /* Clear any abort. */
2360 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2361 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2362 if (retval != ERROR_OK)
2363 return retval;
2364
2365 /* Read DSCR */
2366 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2367 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2368 if (retval != ERROR_OK)
2369 return retval;
2370
2371 /* Switch to non-blocking mode if not already in that mode. */
2372 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2373 if (retval != ERROR_OK)
2374 goto out;
2375
2376 /* Mark R0 as dirty. */
2377 arm_reg_current(arm, 0)->dirty = true;
2378
2379 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2380 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2381 if (retval != ERROR_OK)
2382 goto out;
2383
2384 /* Get the memory address into R0. */
2385 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2386 armv7a->debug_base + CPUDBG_DTRRX, address);
2387 if (retval != ERROR_OK)
2388 goto out;
2389 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2390 if (retval != ERROR_OK)
2391 goto out;
2392
2393 if (size == 4 && (address % 4) == 0) {
2394 /* We are doing a word-aligned transfer, so use fast mode. */
2395 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2396 } else {
2397 /* Use slow path. Adjust size for aligned accesses */
2398 switch (address % 4) {
2399 case 1:
2400 case 3:
2401 count *= size;
2402 size = 1;
2403 break;
2404 case 2:
2405 if (size == 4) {
2406 count *= 2;
2407 size = 2;
2408 }
2409 break;
2410 case 0:
2411 default:
2412 break;
2413 }
2414 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2415 }
2416
2417 out:
2418 final_retval = retval;
2419
2420 /* Switch to non-blocking mode if not already in that mode. */
2421 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2422 if (final_retval == ERROR_OK)
2423 final_retval = retval;
2424
2425 /* Wait for last issued instruction to complete. */
2426 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2427 if (final_retval == ERROR_OK)
2428 final_retval = retval;
2429
2430 /* If there were any sticky abort flags, clear them. */
2431 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2432 fault_dscr = dscr;
2433 mem_ap_write_atomic_u32(armv7a->debug_ap,
2434 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2435 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2436 } else {
2437 fault_dscr = 0;
2438 }
2439
2440 /* Handle synchronous data faults. */
2441 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2442 if (final_retval == ERROR_OK) {
2443 /* Final return value will reflect cause of fault. */
2444 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2445 if (retval == ERROR_OK) {
2446 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2447 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2448 } else
2449 final_retval = retval;
2450 }
2451 /* Fault destroyed DFAR/DFSR; restore them. */
2452 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2453 if (retval != ERROR_OK)
2454 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2455 }
2456
2457 /* Handle asynchronous data faults. */
2458 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2459 if (final_retval == ERROR_OK)
2460 /* No other error has been recorded so far, so keep this one. */
2461 final_retval = ERROR_TARGET_DATA_ABORT;
2462 }
2463
2464 /* If the DCC is nonempty, clear it. */
2465 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2466 uint32_t dummy;
2467 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2468 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2469 if (final_retval == ERROR_OK)
2470 final_retval = retval;
2471 }
2472 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2473 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2474 if (final_retval == ERROR_OK)
2475 final_retval = retval;
2476 }
2477
2478 /* Done. */
2479 return final_retval;
2480 }
2481
2482
2483 /*
2484 * Cortex-A Memory access
2485 *
2486 * This is same Cortex-M3 but we must also use the correct
2487 * ap number for every access.
2488 */
2489
2490 static int cortex_a_read_phys_memory(struct target *target,
2491 target_addr_t address, uint32_t size,
2492 uint32_t count, uint8_t *buffer)
2493 {
2494 int retval;
2495
2496 if (!count || !buffer)
2497 return ERROR_COMMAND_SYNTAX_ERROR;
2498
2499 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2500 address, size, count);
2501
2502 /* read memory through the CPU */
2503 cortex_a_prep_memaccess(target, 1);
2504 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2505 cortex_a_post_memaccess(target, 1);
2506
2507 return retval;
2508 }
2509
2510 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2511 uint32_t size, uint32_t count, uint8_t *buffer)
2512 {
2513 int retval;
2514
2515 /* cortex_a handles unaligned memory access */
2516 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2517 address, size, count);
2518
2519 cortex_a_prep_memaccess(target, 0);
2520 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2521 cortex_a_post_memaccess(target, 0);
2522
2523 return retval;
2524 }
2525
2526 static int cortex_a_write_phys_memory(struct target *target,
2527 target_addr_t address, uint32_t size,
2528 uint32_t count, const uint8_t *buffer)
2529 {
2530 int retval;
2531
2532 if (!count || !buffer)
2533 return ERROR_COMMAND_SYNTAX_ERROR;
2534
2535 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2536 address, size, count);
2537
2538 /* write memory through the CPU */
2539 cortex_a_prep_memaccess(target, 1);
2540 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2541 cortex_a_post_memaccess(target, 1);
2542
2543 return retval;
2544 }
2545
2546 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2547 uint32_t size, uint32_t count, const uint8_t *buffer)
2548 {
2549 int retval;
2550
2551 /* cortex_a handles unaligned memory access */
2552 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2553 address, size, count);
2554
2555 /* memory writes bypass the caches, must flush before writing */
2556 armv7a_cache_auto_flush_on_write(target, address, size * count);
2557
2558 cortex_a_prep_memaccess(target, 0);
2559 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2560 cortex_a_post_memaccess(target, 0);
2561 return retval;
2562 }
2563
2564 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2565 uint32_t count, uint8_t *buffer)
2566 {
2567 uint32_t size;
2568
2569 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2570 * will have something to do with the size we leave to it. */
2571 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2572 if (address & size) {
2573 int retval = target_read_memory(target, address, size, 1, buffer);
2574 if (retval != ERROR_OK)
2575 return retval;
2576 address += size;
2577 count -= size;
2578 buffer += size;
2579 }
2580 }
2581
2582 /* Read the data with as large access size as possible. */
2583 for (; size > 0; size /= 2) {
2584 uint32_t aligned = count - count % size;
2585 if (aligned > 0) {
2586 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2587 if (retval != ERROR_OK)
2588 return retval;
2589 address += aligned;
2590 count -= aligned;
2591 buffer += aligned;
2592 }
2593 }
2594
2595 return ERROR_OK;
2596 }
2597
2598 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2599 uint32_t count, const uint8_t *buffer)
2600 {
2601 uint32_t size;
2602
2603 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2604 * will have something to do with the size we leave to it. */
2605 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2606 if (address & size) {
2607 int retval = target_write_memory(target, address, size, 1, buffer);
2608 if (retval != ERROR_OK)
2609 return retval;
2610 address += size;
2611 count -= size;
2612 buffer += size;
2613 }
2614 }
2615
2616 /* Write the data with as large access size as possible. */
2617 for (; size > 0; size /= 2) {
2618 uint32_t aligned = count - count % size;
2619 if (aligned > 0) {
2620 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2621 if (retval != ERROR_OK)
2622 return retval;
2623 address += aligned;
2624 count -= aligned;
2625 buffer += aligned;
2626 }
2627 }
2628
2629 return ERROR_OK;
2630 }
2631
2632 static int cortex_a_handle_target_request(void *priv)
2633 {
2634 struct target *target = priv;
2635 struct armv7a_common *armv7a = target_to_armv7a(target);
2636 int retval;
2637
2638 if (!target_was_examined(target))
2639 return ERROR_OK;
2640 if (!target->dbg_msg_enabled)
2641 return ERROR_OK;
2642
2643 if (target->state == TARGET_RUNNING) {
2644 uint32_t request;
2645 uint32_t dscr;
2646 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2647 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2648
2649 /* check if we have data */
2650 int64_t then = timeval_ms();
2651 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2652 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2653 armv7a->debug_base + CPUDBG_DTRTX, &request);
2654 if (retval == ERROR_OK) {
2655 target_request(target, request);
2656 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2657 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2658 }
2659 if (timeval_ms() > then + 1000) {
2660 LOG_ERROR("Timeout waiting for dtr tx full");
2661 return ERROR_FAIL;
2662 }
2663 }
2664 }
2665
2666 return ERROR_OK;
2667 }
2668
2669 /*
2670 * Cortex-A target information and configuration
2671 */
2672
2673 static int cortex_a_examine_first(struct target *target)
2674 {
2675 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2676 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2677 struct adiv5_dap *swjdp = armv7a->arm.dap;
2678
2679 int i;
2680 int retval = ERROR_OK;
2681 uint32_t didr, cpuid, dbg_osreg;
2682
2683 /* Search for the APB-AP - it is needed for access to debug registers */
2684 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2685 if (retval != ERROR_OK) {
2686 LOG_ERROR("Could not find APB-AP for debug access");
2687 return retval;
2688 }
2689
2690 retval = mem_ap_init(armv7a->debug_ap);
2691 if (retval != ERROR_OK) {
2692 LOG_ERROR("Could not initialize the APB-AP");
2693 return retval;
2694 }
2695
2696 armv7a->debug_ap->memaccess_tck = 80;
2697
2698 if (!target->dbgbase_set) {
2699 uint32_t dbgbase;
2700 /* Get ROM Table base */
2701 uint32_t apid;
2702 int32_t coreidx = target->coreid;
2703 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2704 target->cmd_name);
2705 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2706 if (retval != ERROR_OK)
2707 return retval;
2708 /* Lookup 0x15 -- Processor DAP */
2709 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2710 &armv7a->debug_base, &coreidx);
2711 if (retval != ERROR_OK) {
2712 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2713 target->cmd_name);
2714 return retval;
2715 }
2716 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2717 target->coreid, armv7a->debug_base);
2718 } else
2719 armv7a->debug_base = target->dbgbase;
2720
2721 if ((armv7a->debug_base & (1UL<<31)) == 0)
2722 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2723 "Please fix the target configuration.", target_name(target));
2724
2725 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2726 armv7a->debug_base + CPUDBG_DIDR, &didr);
2727 if (retval != ERROR_OK) {
2728 LOG_DEBUG("Examine %s failed", "DIDR");
2729 return retval;
2730 }
2731
2732 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2733 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2734 if (retval != ERROR_OK) {
2735 LOG_DEBUG("Examine %s failed", "CPUID");
2736 return retval;
2737 }
2738
2739 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2740 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2741
2742 cortex_a->didr = didr;
2743 cortex_a->cpuid = cpuid;
2744
2745 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2746 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2747 if (retval != ERROR_OK)
2748 return retval;
2749 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2750
2751 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2752 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2753 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2754 return ERROR_TARGET_INIT_FAILED;
2755 }
2756
2757 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2758 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2759
2760 /* Read DBGOSLSR and check if OSLK is implemented */
2761 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2762 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2763 if (retval != ERROR_OK)
2764 return retval;
2765 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2766
2767 /* check if OS Lock is implemented */
2768 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2769 /* check if OS Lock is set */
2770 if (dbg_osreg & OSLSR_OSLK) {
2771 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2772
2773 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2774 armv7a->debug_base + CPUDBG_OSLAR,
2775 0);
2776 if (retval == ERROR_OK)
2777 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2778 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2779
2780 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2781 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2782 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2783 target->coreid);
2784 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2785 return ERROR_TARGET_INIT_FAILED;
2786 }
2787 }
2788 }
2789
2790 armv7a->arm.core_type = ARM_MODE_MON;
2791
2792 /* Avoid recreating the registers cache */
2793 if (!target_was_examined(target)) {
2794 retval = cortex_a_dpm_setup(cortex_a, didr);
2795 if (retval != ERROR_OK)
2796 return retval;
2797 }
2798
2799 /* Setup Breakpoint Register Pairs */
2800 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2801 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2802 cortex_a->brp_num_available = cortex_a->brp_num;
2803 free(cortex_a->brp_list);
2804 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2805 /* cortex_a->brb_enabled = ????; */
2806 for (i = 0; i < cortex_a->brp_num; i++) {
2807 cortex_a->brp_list[i].used = 0;
2808 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2809 cortex_a->brp_list[i].type = BRP_NORMAL;
2810 else
2811 cortex_a->brp_list[i].type = BRP_CONTEXT;
2812 cortex_a->brp_list[i].value = 0;
2813 cortex_a->brp_list[i].control = 0;
2814 cortex_a->brp_list[i].BRPn = i;
2815 }
2816
2817 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2818
2819 /* select debug_ap as default */
2820 swjdp->apsel = armv7a->debug_ap->ap_num;
2821
2822 target_set_examined(target);
2823 return ERROR_OK;
2824 }
2825
2826 static int cortex_a_examine(struct target *target)
2827 {
2828 int retval = ERROR_OK;
2829
2830 /* Reestablish communication after target reset */
2831 retval = cortex_a_examine_first(target);
2832
2833 /* Configure core debug access */
2834 if (retval == ERROR_OK)
2835 retval = cortex_a_init_debug_access(target);
2836
2837 return retval;
2838 }
2839
2840 /*
2841 * Cortex-A target creation and initialization
2842 */
2843
2844 static int cortex_a_init_target(struct command_context *cmd_ctx,
2845 struct target *target)
2846 {
2847 /* examine_first() does a bunch of this */
2848 arm_semihosting_init(target);
2849 return ERROR_OK;
2850 }
2851
2852 static int cortex_a_init_arch_info(struct target *target,
2853 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2854 {
2855 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2856
2857 /* Setup struct cortex_a_common */
2858 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2859 armv7a->arm.dap = dap;
2860
2861 /* register arch-specific functions */
2862 armv7a->examine_debug_reason = NULL;
2863
2864 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2865
2866 armv7a->pre_restore_context = NULL;
2867
2868 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2869
2870
2871 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2872
2873 /* REVISIT v7a setup should be in a v7a-specific routine */
2874 armv7a_init_arch_info(target, armv7a);
2875 target_register_timer_callback(cortex_a_handle_target_request, 1,
2876 TARGET_TIMER_TYPE_PERIODIC, target);
2877
2878 return ERROR_OK;
2879 }
2880
2881 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2882 {
2883 struct cortex_a_common *cortex_a;
2884 struct adiv5_private_config *pc;
2885
2886 if (target->private_config == NULL)
2887 return ERROR_FAIL;
2888
2889 pc = (struct adiv5_private_config *)target->private_config;
2890
2891 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2892 if (cortex_a == NULL) {
2893 LOG_ERROR("Out of memory");
2894 return ERROR_FAIL;
2895 }
2896 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2897 cortex_a->armv7a_common.is_armv7r = false;
2898 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2899
2900 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2901 }
2902
2903 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2904 {
2905 struct cortex_a_common *cortex_a;
2906 struct adiv5_private_config *pc;
2907
2908 pc = (struct adiv5_private_config *)target->private_config;
2909 if (adiv5_verify_config(pc) != ERROR_OK)
2910 return ERROR_FAIL;
2911
2912 cortex_a = calloc(1, sizeof(struct cortex_a_common));
2913 if (cortex_a == NULL) {
2914 LOG_ERROR("Out of memory");
2915 return ERROR_FAIL;
2916 }
2917 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2918 cortex_a->armv7a_common.is_armv7r = true;
2919
2920 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2921 }
2922
2923 static void cortex_a_deinit_target(struct target *target)
2924 {
2925 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2926 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2927 struct arm_dpm *dpm = &armv7a->dpm;
2928 uint32_t dscr;
2929 int retval;
2930
2931 if (target_was_examined(target)) {
2932 /* Disable halt for breakpoint, watchpoint and vector catch */
2933 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2934 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2935 if (retval == ERROR_OK)
2936 mem_ap_write_atomic_u32(armv7a->debug_ap,
2937 armv7a->debug_base + CPUDBG_DSCR,
2938 dscr & ~DSCR_HALT_DBG_MODE);
2939 }
2940
2941 free(cortex_a->brp_list);
2942 free(dpm->dbp);
2943 free(dpm->dwp);
2944 free(target->private_config);
2945 free(cortex_a);
2946 }
2947
2948 static int cortex_a_mmu(struct target *target, int *enabled)
2949 {
2950 struct armv7a_common *armv7a = target_to_armv7a(target);
2951
2952 if (target->state != TARGET_HALTED) {
2953 LOG_ERROR("%s: target not halted", __func__);
2954 return ERROR_TARGET_INVALID;
2955 }
2956
2957 if (armv7a->is_armv7r)
2958 *enabled = 0;
2959 else
2960 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2961
2962 return ERROR_OK;
2963 }
2964
2965 static int cortex_a_virt2phys(struct target *target,
2966 target_addr_t virt, target_addr_t *phys)
2967 {
2968 int retval;
2969 int mmu_enabled = 0;
2970
2971 /*
2972 * If the MMU was not enabled at debug entry, there is no
2973 * way of knowing if there was ever a valid configuration
2974 * for it and thus it's not safe to enable it. In this case,
2975 * just return the virtual address as physical.
2976 */
2977 cortex_a_mmu(target, &mmu_enabled);
2978 if (!mmu_enabled) {
2979 *phys = virt;
2980 return ERROR_OK;
2981 }
2982
2983 /* mmu must be enable in order to get a correct translation */
2984 retval = cortex_a_mmu_modify(target, 1);
2985 if (retval != ERROR_OK)
2986 return retval;
2987 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2988 phys, 1);
2989 }
2990
2991 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2992 {
2993 struct target *target = get_current_target(CMD_CTX);
2994 struct armv7a_common *armv7a = target_to_armv7a(target);
2995
2996 return armv7a_handle_cache_info_command(CMD,
2997 &armv7a->armv7a_mmu.armv7a_cache);
2998 }
2999
3000
3001 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3002 {
3003 struct target *target = get_current_target(CMD_CTX);
3004 if (!target_was_examined(target)) {
3005 LOG_ERROR("target not examined yet");
3006 return ERROR_FAIL;
3007 }
3008
3009 return cortex_a_init_debug_access(target);
3010 }
3011
3012 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3013 {
3014 struct target *target = get_current_target(CMD_CTX);
3015 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3016
3017 static const Jim_Nvp nvp_maskisr_modes[] = {
3018 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3019 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3020 { .name = NULL, .value = -1 },
3021 };
3022 const Jim_Nvp *n;
3023
3024 if (CMD_ARGC > 0) {
3025 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3026 if (n->name == NULL) {
3027 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3028 return ERROR_COMMAND_SYNTAX_ERROR;
3029 }
3030
3031 cortex_a->isrmasking_mode = n->value;
3032 }
3033
3034 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3035 command_print(CMD, "cortex_a interrupt mask %s", n->name);
3036
3037 return ERROR_OK;
3038 }
3039
3040 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3041 {
3042 struct target *target = get_current_target(CMD_CTX);
3043 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3044
3045 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3046 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3047 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3048 { .name = NULL, .value = -1 },
3049 };
3050 const Jim_Nvp *n;
3051
3052 if (CMD_ARGC > 0) {
3053 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3054 if (n->name == NULL)
3055 return ERROR_COMMAND_SYNTAX_ERROR;
3056 cortex_a->dacrfixup_mode = n->value;
3057
3058 }
3059
3060 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3061 command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3062
3063 return ERROR_OK;
3064 }
3065
3066 static const struct command_registration cortex_a_exec_command_handlers[] = {
3067 {
3068 .name = "cache_info",
3069 .handler = cortex_a_handle_cache_info_command,
3070 .mode = COMMAND_EXEC,
3071 .help = "display information about target caches",
3072 .usage = "",
3073 },
3074 {
3075 .name = "dbginit",
3076 .handler = cortex_a_handle_dbginit_command,
3077 .mode = COMMAND_EXEC,
3078 .help = "Initialize core debug",
3079 .usage = "",
3080 },
3081 {
3082 .name = "maskisr",
3083 .handler = handle_cortex_a_mask_interrupts_command,
3084 .mode = COMMAND_ANY,
3085 .help = "mask cortex_a interrupts",
3086 .usage = "['on'|'off']",
3087 },
3088 {
3089 .name = "dacrfixup",
3090 .handler = handle_cortex_a_dacrfixup_command,
3091 .mode = COMMAND_ANY,
3092 .help = "set domain access control (DACR) to all-manager "
3093 "on memory access",
3094 .usage = "['on'|'off']",
3095 },
3096 {
3097 .chain = armv7a_mmu_command_handlers,
3098 },
3099 {
3100 .chain = smp_command_handlers,
3101 },
3102
3103 COMMAND_REGISTRATION_DONE
3104 };
3105 static const struct command_registration cortex_a_command_handlers[] = {
3106 {
3107 .chain = arm_command_handlers,
3108 },
3109 {
3110 .chain = armv7a_command_handlers,
3111 },
3112 {
3113 .name = "cortex_a",
3114 .mode = COMMAND_ANY,
3115 .help = "Cortex-A command group",
3116 .usage = "",
3117 .chain = cortex_a_exec_command_handlers,
3118 },
3119 COMMAND_REGISTRATION_DONE
3120 };
3121
3122 struct target_type cortexa_target = {
3123 .name = "cortex_a",
3124 .deprecated_name = "cortex_a8",
3125
3126 .poll = cortex_a_poll,
3127 .arch_state = armv7a_arch_state,
3128
3129 .halt = cortex_a_halt,
3130 .resume = cortex_a_resume,
3131 .step = cortex_a_step,
3132
3133 .assert_reset = cortex_a_assert_reset,
3134 .deassert_reset = cortex_a_deassert_reset,
3135
3136 /* REVISIT allow exporting VFP3 registers ... */
3137 .get_gdb_arch = arm_get_gdb_arch,
3138 .get_gdb_reg_list = arm_get_gdb_reg_list,
3139
3140 .read_memory = cortex_a_read_memory,
3141 .write_memory = cortex_a_write_memory,
3142
3143 .read_buffer = cortex_a_read_buffer,
3144 .write_buffer = cortex_a_write_buffer,
3145
3146 .checksum_memory = arm_checksum_memory,
3147 .blank_check_memory = arm_blank_check_memory,
3148
3149 .run_algorithm = armv4_5_run_algorithm,
3150
3151 .add_breakpoint = cortex_a_add_breakpoint,
3152 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3153 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3154 .remove_breakpoint = cortex_a_remove_breakpoint,
3155 .add_watchpoint = NULL,
3156 .remove_watchpoint = NULL,
3157
3158 .commands = cortex_a_command_handlers,
3159 .target_create = cortex_a_target_create,
3160 .target_jim_configure = adiv5_jim_configure,
3161 .init_target = cortex_a_init_target,
3162 .examine = cortex_a_examine,
3163 .deinit_target = cortex_a_deinit_target,
3164
3165 .read_phys_memory = cortex_a_read_phys_memory,
3166 .write_phys_memory = cortex_a_write_phys_memory,
3167 .mmu = cortex_a_mmu,
3168 .virt2phys = cortex_a_virt2phys,
3169 };
3170
3171 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3172 {
3173 .name = "dbginit",
3174 .handler = cortex_a_handle_dbginit_command,
3175 .mode = COMMAND_EXEC,
3176 .help = "Initialize core debug",
3177 .usage = "",
3178 },
3179 {
3180 .name = "maskisr",
3181 .handler = handle_cortex_a_mask_interrupts_command,
3182 .mode = COMMAND_EXEC,
3183 .help = "mask cortex_r4 interrupts",
3184 .usage = "['on'|'off']",
3185 },
3186
3187 COMMAND_REGISTRATION_DONE
3188 };
3189 static const struct command_registration cortex_r4_command_handlers[] = {
3190 {
3191 .chain = arm_command_handlers,
3192 },
3193 {
3194 .name = "cortex_r4",
3195 .mode = COMMAND_ANY,
3196 .help = "Cortex-R4 command group",
3197 .usage = "",
3198 .chain = cortex_r4_exec_command_handlers,
3199 },
3200 COMMAND_REGISTRATION_DONE
3201 };
3202
3203 struct target_type cortexr4_target = {
3204 .name = "cortex_r4",
3205
3206 .poll = cortex_a_poll,
3207 .arch_state = armv7a_arch_state,
3208
3209 .halt = cortex_a_halt,
3210 .resume = cortex_a_resume,
3211 .step = cortex_a_step,
3212
3213 .assert_reset = cortex_a_assert_reset,
3214 .deassert_reset = cortex_a_deassert_reset,
3215
3216 /* REVISIT allow exporting VFP3 registers ... */
3217 .get_gdb_arch = arm_get_gdb_arch,
3218 .get_gdb_reg_list = arm_get_gdb_reg_list,
3219
3220 .read_memory = cortex_a_read_phys_memory,
3221 .write_memory = cortex_a_write_phys_memory,
3222
3223 .checksum_memory = arm_checksum_memory,
3224 .blank_check_memory = arm_blank_check_memory,
3225
3226 .run_algorithm = armv4_5_run_algorithm,
3227
3228 .add_breakpoint = cortex_a_add_breakpoint,
3229 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3230 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3231 .remove_breakpoint = cortex_a_remove_breakpoint,
3232 .add_watchpoint = NULL,
3233 .remove_watchpoint = NULL,
3234
3235 .commands = cortex_r4_command_handlers,
3236 .target_create = cortex_r4_target_create,
3237 .target_jim_configure = adiv5_jim_configure,
3238 .init_target = cortex_a_init_target,
3239 .examine = cortex_a_examine,
3240 .deinit_target = cortex_a_deinit_target,
3241 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)