Cortex A/R : Allow interrupt disable during single-step
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_apb_ab_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
142 cortex_a->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a->cp15_control_reg_curr);
147 }
148 } else {
149 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
150 if (cortex_a->cp15_control_reg_curr & 0x4U) {
151 /* data cache is active */
152 cortex_a->cp15_control_reg_curr &= ~0x4U;
153 /* flush data cache armv7 function to be called */
154 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
155 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
156 }
157 cortex_a->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175
176 LOG_DEBUG(" ");
177
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
181 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
182 if (retval != ERROR_OK) {
183 /* try again */
184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
185 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
186 if (retval == ERROR_OK)
187 LOG_USER(
188 "Locking debug access failed on first, but succeeded on second try.");
189 }
190
191 return retval;
192 }
193
194 /*
195 * Cortex-A Basic debug access, very low level assumes state is saved
196 */
197 static int cortex_a_init_debug_access(struct target *target)
198 {
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = armv7a->arm.dap;
201 int retval;
202 uint32_t dbg_osreg;
203 uint32_t cortex_part_num;
204 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
205
206 LOG_DEBUG(" ");
207 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT;
209
210 switch (cortex_part_num) {
211 case CORTEX_A7_PARTNUM:
212 case CORTEX_A15_PARTNUM:
213 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
214 armv7a->debug_base + CPUDBG_OSLSR,
215 &dbg_osreg);
216 if (retval != ERROR_OK)
217 return retval;
218
219 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
220
221 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
222 /* Unlocking the DEBUG OS registers for modification */
223 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_OSLAR,
225 0);
226 break;
227
228 case CORTEX_A5_PARTNUM:
229 case CORTEX_A8_PARTNUM:
230 case CORTEX_A9_PARTNUM:
231 default:
232 retval = cortex_a8_init_debug_access(target);
233 }
234
235 if (retval != ERROR_OK)
236 return retval;
237 /* Clear Sticky Power Down status Bit in PRSR to enable access to
238 the registers in the Core Power Domain */
239 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
240 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
241 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
242
243 if (retval != ERROR_OK)
244 return retval;
245
246 /* Enabling of instruction execution in debug mode is done in debug_entry code */
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return cortex_a_poll(target);
252 }
253
254 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
255 {
256 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
257 * Writes final value of DSCR into *dscr. Pass force to force always
258 * reading DSCR at least once. */
259 struct armv7a_common *armv7a = target_to_armv7a(target);
260 struct adiv5_dap *swjdp = armv7a->arm.dap;
261 long long then = timeval_ms();
262 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
263 force = false;
264 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
265 armv7a->debug_base + CPUDBG_DSCR, dscr);
266 if (retval != ERROR_OK) {
267 LOG_ERROR("Could not read DSCR register");
268 return retval;
269 }
270 if (timeval_ms() > then + 1000) {
271 LOG_ERROR("Timeout waiting for InstrCompl=1");
272 return ERROR_FAIL;
273 }
274 }
275 return ERROR_OK;
276 }
277
278 /* To reduce needless round-trips, pass in a pointer to the current
279 * DSCR value. Initialize it to zero if you just need to know the
280 * value on return from this function; or DSCR_INSTR_COMP if you
281 * happen to know that no instruction is pending.
282 */
283 static int cortex_a_exec_opcode(struct target *target,
284 uint32_t opcode, uint32_t *dscr_p)
285 {
286 uint32_t dscr;
287 int retval;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289 struct adiv5_dap *swjdp = armv7a->arm.dap;
290
291 dscr = dscr_p ? *dscr_p : 0;
292
293 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
294
295 /* Wait for InstrCompl bit to be set */
296 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
297 if (retval != ERROR_OK)
298 return retval;
299
300 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
301 armv7a->debug_base + CPUDBG_ITR, opcode);
302 if (retval != ERROR_OK)
303 return retval;
304
305 long long then = timeval_ms();
306 do {
307 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
308 armv7a->debug_base + CPUDBG_DSCR, &dscr);
309 if (retval != ERROR_OK) {
310 LOG_ERROR("Could not read DSCR register");
311 return retval;
312 }
313 if (timeval_ms() > then + 1000) {
314 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
315 return ERROR_FAIL;
316 }
317 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
318
319 if (dscr_p)
320 *dscr_p = dscr;
321
322 return retval;
323 }
324
325 /**************************************************************************
326 Read core register with very few exec_opcode, fast but needs work_area.
327 This can cause problems with MMU active.
328 **************************************************************************/
329 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
330 uint32_t *regfile)
331 {
332 int retval = ERROR_OK;
333 struct armv7a_common *armv7a = target_to_armv7a(target);
334 struct adiv5_dap *swjdp = armv7a->arm.dap;
335
336 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
337 if (retval != ERROR_OK)
338 return retval;
339 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
340 if (retval != ERROR_OK)
341 return retval;
342 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
343 if (retval != ERROR_OK)
344 return retval;
345
346 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
347 (uint8_t *)(&regfile[1]), 4, 15, address);
348
349 return retval;
350 }
351
352 static int cortex_a_dap_read_coreregister_u32(struct target *target,
353 uint32_t *value, int regnum)
354 {
355 int retval = ERROR_OK;
356 uint8_t reg = regnum&0xFF;
357 uint32_t dscr = 0;
358 struct armv7a_common *armv7a = target_to_armv7a(target);
359 struct adiv5_dap *swjdp = armv7a->arm.dap;
360
361 if (reg > 17)
362 return retval;
363
364 if (reg < 15) {
365 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
366 retval = cortex_a_exec_opcode(target,
367 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
368 &dscr);
369 if (retval != ERROR_OK)
370 return retval;
371 } else if (reg == 15) {
372 /* "MOV r0, r15"; then move r0 to DCCTX */
373 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 retval = cortex_a_exec_opcode(target,
377 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
378 &dscr);
379 if (retval != ERROR_OK)
380 return retval;
381 } else {
382 /* "MRS r0, CPSR" or "MRS r0, SPSR"
383 * then move r0 to DCCTX
384 */
385 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
386 if (retval != ERROR_OK)
387 return retval;
388 retval = cortex_a_exec_opcode(target,
389 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
390 &dscr);
391 if (retval != ERROR_OK)
392 return retval;
393 }
394
395 /* Wait for DTRRXfull then read DTRRTX */
396 long long then = timeval_ms();
397 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
398 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
399 armv7a->debug_base + CPUDBG_DSCR, &dscr);
400 if (retval != ERROR_OK)
401 return retval;
402 if (timeval_ms() > then + 1000) {
403 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
404 return ERROR_FAIL;
405 }
406 }
407
408 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
409 armv7a->debug_base + CPUDBG_DTRTX, value);
410 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
411
412 return retval;
413 }
414
415 static int cortex_a_dap_write_coreregister_u32(struct target *target,
416 uint32_t value, int regnum)
417 {
418 int retval = ERROR_OK;
419 uint8_t Rd = regnum&0xFF;
420 uint32_t dscr;
421 struct armv7a_common *armv7a = target_to_armv7a(target);
422 struct adiv5_dap *swjdp = armv7a->arm.dap;
423
424 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
425
426 /* Check that DCCRX is not full */
427 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
428 armv7a->debug_base + CPUDBG_DSCR, &dscr);
429 if (retval != ERROR_OK)
430 return retval;
431 if (dscr & DSCR_DTR_RX_FULL) {
432 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
433 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
434 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
435 &dscr);
436 if (retval != ERROR_OK)
437 return retval;
438 }
439
440 if (Rd > 17)
441 return retval;
442
443 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
444 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
445 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
446 armv7a->debug_base + CPUDBG_DTRRX, value);
447 if (retval != ERROR_OK)
448 return retval;
449
450 if (Rd < 15) {
451 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
452 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
453 &dscr);
454
455 if (retval != ERROR_OK)
456 return retval;
457 } else if (Rd == 15) {
458 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
459 * then "mov r15, r0"
460 */
461 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
466 if (retval != ERROR_OK)
467 return retval;
468 } else {
469 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
470 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
471 */
472 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 &dscr);
474 if (retval != ERROR_OK)
475 return retval;
476 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
480
481 /* "Prefetch flush" after modifying execution status in CPSR */
482 if (Rd == 16) {
483 retval = cortex_a_exec_opcode(target,
484 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
485 &dscr);
486 if (retval != ERROR_OK)
487 return retval;
488 }
489 }
490
491 return retval;
492 }
493
494 /* Write to memory mapped registers directly with no cache or mmu handling */
495 static int cortex_a_dap_write_memap_register_u32(struct target *target,
496 uint32_t address,
497 uint32_t value)
498 {
499 int retval;
500 struct armv7a_common *armv7a = target_to_armv7a(target);
501 struct adiv5_dap *swjdp = armv7a->arm.dap;
502
503 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
504
505 return retval;
506 }
507
508 /*
509 * Cortex-A implementation of Debug Programmer's Model
510 *
511 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
512 * so there's no need to poll for it before executing an instruction.
513 *
514 * NOTE that in several of these cases the "stall" mode might be useful.
515 * It'd let us queue a few operations together... prepare/finish might
516 * be the places to enable/disable that mode.
517 */
518
519 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
520 {
521 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
522 }
523
524 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
525 {
526 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
527 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
528 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
529 }
530
531 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
532 uint32_t *dscr_p)
533 {
534 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
535 uint32_t dscr = DSCR_INSTR_COMP;
536 int retval;
537
538 if (dscr_p)
539 dscr = *dscr_p;
540
541 /* Wait for DTRRXfull */
542 long long then = timeval_ms();
543 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
544 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
545 a->armv7a_common.debug_base + CPUDBG_DSCR,
546 &dscr);
547 if (retval != ERROR_OK)
548 return retval;
549 if (timeval_ms() > then + 1000) {
550 LOG_ERROR("Timeout waiting for read dcc");
551 return ERROR_FAIL;
552 }
553 }
554
555 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
556 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
557 if (retval != ERROR_OK)
558 return retval;
559 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
560
561 if (dscr_p)
562 *dscr_p = dscr;
563
564 return retval;
565 }
566
567 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
568 {
569 struct cortex_a_common *a = dpm_to_a(dpm);
570 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
571 uint32_t dscr;
572 int retval;
573
574 /* set up invariant: INSTR_COMP is set after ever DPM operation */
575 long long then = timeval_ms();
576 for (;; ) {
577 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
578 a->armv7a_common.debug_base + CPUDBG_DSCR,
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582 if ((dscr & DSCR_INSTR_COMP) != 0)
583 break;
584 if (timeval_ms() > then + 1000) {
585 LOG_ERROR("Timeout waiting for dpm prepare");
586 return ERROR_FAIL;
587 }
588 }
589
590 /* this "should never happen" ... */
591 if (dscr & DSCR_DTR_RX_FULL) {
592 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
593 /* Clear DCCRX */
594 retval = cortex_a_exec_opcode(
595 a->armv7a_common.arm.target,
596 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
597 &dscr);
598 if (retval != ERROR_OK)
599 return retval;
600 }
601
602 return retval;
603 }
604
605 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
606 {
607 /* REVISIT what could be done here? */
608 return ERROR_OK;
609 }
610
611 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
612 uint32_t opcode, uint32_t data)
613 {
614 struct cortex_a_common *a = dpm_to_a(dpm);
615 int retval;
616 uint32_t dscr = DSCR_INSTR_COMP;
617
618 retval = cortex_a_write_dcc(a, data);
619 if (retval != ERROR_OK)
620 return retval;
621
622 return cortex_a_exec_opcode(
623 a->armv7a_common.arm.target,
624 opcode,
625 &dscr);
626 }
627
628 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
629 uint32_t opcode, uint32_t data)
630 {
631 struct cortex_a_common *a = dpm_to_a(dpm);
632 uint32_t dscr = DSCR_INSTR_COMP;
633 int retval;
634
635 retval = cortex_a_write_dcc(a, data);
636 if (retval != ERROR_OK)
637 return retval;
638
639 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
640 retval = cortex_a_exec_opcode(
641 a->armv7a_common.arm.target,
642 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
643 &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 /* then the opcode, taking data from R0 */
648 retval = cortex_a_exec_opcode(
649 a->armv7a_common.arm.target,
650 opcode,
651 &dscr);
652
653 return retval;
654 }
655
656 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
657 {
658 struct target *target = dpm->arm->target;
659 uint32_t dscr = DSCR_INSTR_COMP;
660
661 /* "Prefetch flush" after modifying execution status in CPSR */
662 return cortex_a_exec_opcode(target,
663 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
664 &dscr);
665 }
666
667 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
668 uint32_t opcode, uint32_t *data)
669 {
670 struct cortex_a_common *a = dpm_to_a(dpm);
671 int retval;
672 uint32_t dscr = DSCR_INSTR_COMP;
673
674 /* the opcode, writing data to DCC */
675 retval = cortex_a_exec_opcode(
676 a->armv7a_common.arm.target,
677 opcode,
678 &dscr);
679 if (retval != ERROR_OK)
680 return retval;
681
682 return cortex_a_read_dcc(a, data, &dscr);
683 }
684
685
686 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
687 uint32_t opcode, uint32_t *data)
688 {
689 struct cortex_a_common *a = dpm_to_a(dpm);
690 uint32_t dscr = DSCR_INSTR_COMP;
691 int retval;
692
693 /* the opcode, writing data to R0 */
694 retval = cortex_a_exec_opcode(
695 a->armv7a_common.arm.target,
696 opcode,
697 &dscr);
698 if (retval != ERROR_OK)
699 return retval;
700
701 /* write R0 to DCC */
702 retval = cortex_a_exec_opcode(
703 a->armv7a_common.arm.target,
704 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
705 &dscr);
706 if (retval != ERROR_OK)
707 return retval;
708
709 return cortex_a_read_dcc(a, data, &dscr);
710 }
711
712 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
713 uint32_t addr, uint32_t control)
714 {
715 struct cortex_a_common *a = dpm_to_a(dpm);
716 uint32_t vr = a->armv7a_common.debug_base;
717 uint32_t cr = a->armv7a_common.debug_base;
718 int retval;
719
720 switch (index_t) {
721 case 0 ... 15: /* breakpoints */
722 vr += CPUDBG_BVR_BASE;
723 cr += CPUDBG_BCR_BASE;
724 break;
725 case 16 ... 31: /* watchpoints */
726 vr += CPUDBG_WVR_BASE;
727 cr += CPUDBG_WCR_BASE;
728 index_t -= 16;
729 break;
730 default:
731 return ERROR_FAIL;
732 }
733 vr += 4 * index_t;
734 cr += 4 * index_t;
735
736 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
737 (unsigned) vr, (unsigned) cr);
738
739 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
740 vr, addr);
741 if (retval != ERROR_OK)
742 return retval;
743 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
744 cr, control);
745 return retval;
746 }
747
748 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
749 {
750 struct cortex_a_common *a = dpm_to_a(dpm);
751 uint32_t cr;
752
753 switch (index_t) {
754 case 0 ... 15:
755 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
756 break;
757 case 16 ... 31:
758 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
759 index_t -= 16;
760 break;
761 default:
762 return ERROR_FAIL;
763 }
764 cr += 4 * index_t;
765
766 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
767
768 /* clear control register */
769 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
770 }
771
772 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
773 {
774 struct arm_dpm *dpm = &a->armv7a_common.dpm;
775 int retval;
776
777 dpm->arm = &a->armv7a_common.arm;
778 dpm->didr = didr;
779
780 dpm->prepare = cortex_a_dpm_prepare;
781 dpm->finish = cortex_a_dpm_finish;
782
783 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
784 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
785 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
786
787 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
788 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
789
790 dpm->bpwp_enable = cortex_a_bpwp_enable;
791 dpm->bpwp_disable = cortex_a_bpwp_disable;
792
793 retval = arm_dpm_setup(dpm);
794 if (retval == ERROR_OK)
795 retval = arm_dpm_initialize(dpm);
796
797 return retval;
798 }
799 static struct target *get_cortex_a(struct target *target, int32_t coreid)
800 {
801 struct target_list *head;
802 struct target *curr;
803
804 head = target->head;
805 while (head != (struct target_list *)NULL) {
806 curr = head->target;
807 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
808 return curr;
809 head = head->next;
810 }
811 return target;
812 }
813 static int cortex_a_halt(struct target *target);
814
815 static int cortex_a_halt_smp(struct target *target)
816 {
817 int retval = 0;
818 struct target_list *head;
819 struct target *curr;
820 head = target->head;
821 while (head != (struct target_list *)NULL) {
822 curr = head->target;
823 if ((curr != target) && (curr->state != TARGET_HALTED))
824 retval += cortex_a_halt(curr);
825 head = head->next;
826 }
827 return retval;
828 }
829
830 static int update_halt_gdb(struct target *target)
831 {
832 int retval = 0;
833 if (target->gdb_service && target->gdb_service->core[0] == -1) {
834 target->gdb_service->target = target;
835 target->gdb_service->core[0] = target->coreid;
836 retval += cortex_a_halt_smp(target);
837 }
838 return retval;
839 }
840
841 /*
842 * Cortex-A Run control
843 */
844
845 static int cortex_a_poll(struct target *target)
846 {
847 int retval = ERROR_OK;
848 uint32_t dscr;
849 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
850 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
851 struct adiv5_dap *swjdp = armv7a->arm.dap;
852 enum target_state prev_target_state = target->state;
853 /* toggle to another core is done by gdb as follow */
854 /* maint packet J core_id */
855 /* continue */
856 /* the next polling trigger an halt event sent to gdb */
857 if ((target->state == TARGET_HALTED) && (target->smp) &&
858 (target->gdb_service) &&
859 (target->gdb_service->target == NULL)) {
860 target->gdb_service->target =
861 get_cortex_a(target, target->gdb_service->core[1]);
862 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
863 return retval;
864 }
865 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
866 armv7a->debug_base + CPUDBG_DSCR, &dscr);
867 if (retval != ERROR_OK)
868 return retval;
869 cortex_a->cpudbg_dscr = dscr;
870
871 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
872 if (prev_target_state != TARGET_HALTED) {
873 /* We have a halting debug event */
874 LOG_DEBUG("Target halted");
875 target->state = TARGET_HALTED;
876 if ((prev_target_state == TARGET_RUNNING)
877 || (prev_target_state == TARGET_UNKNOWN)
878 || (prev_target_state == TARGET_RESET)) {
879 retval = cortex_a_debug_entry(target);
880 if (retval != ERROR_OK)
881 return retval;
882 if (target->smp) {
883 retval = update_halt_gdb(target);
884 if (retval != ERROR_OK)
885 return retval;
886 }
887 target_call_event_callbacks(target,
888 TARGET_EVENT_HALTED);
889 }
890 if (prev_target_state == TARGET_DEBUG_RUNNING) {
891 LOG_DEBUG(" ");
892
893 retval = cortex_a_debug_entry(target);
894 if (retval != ERROR_OK)
895 return retval;
896 if (target->smp) {
897 retval = update_halt_gdb(target);
898 if (retval != ERROR_OK)
899 return retval;
900 }
901
902 target_call_event_callbacks(target,
903 TARGET_EVENT_DEBUG_HALTED);
904 }
905 }
906 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
907 target->state = TARGET_RUNNING;
908 else {
909 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
910 target->state = TARGET_UNKNOWN;
911 }
912
913 return retval;
914 }
915
916 static int cortex_a_halt(struct target *target)
917 {
918 int retval = ERROR_OK;
919 uint32_t dscr;
920 struct armv7a_common *armv7a = target_to_armv7a(target);
921 struct adiv5_dap *swjdp = armv7a->arm.dap;
922
923 /*
924 * Tell the core to be halted by writing DRCR with 0x1
925 * and then wait for the core to be halted.
926 */
927 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
928 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
929 if (retval != ERROR_OK)
930 return retval;
931
932 /*
933 * enter halting debug mode
934 */
935 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
936 armv7a->debug_base + CPUDBG_DSCR, &dscr);
937 if (retval != ERROR_OK)
938 return retval;
939
940 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
941 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
942 if (retval != ERROR_OK)
943 return retval;
944
945 long long then = timeval_ms();
946 for (;; ) {
947 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
948 armv7a->debug_base + CPUDBG_DSCR, &dscr);
949 if (retval != ERROR_OK)
950 return retval;
951 if ((dscr & DSCR_CORE_HALTED) != 0)
952 break;
953 if (timeval_ms() > then + 1000) {
954 LOG_ERROR("Timeout waiting for halt");
955 return ERROR_FAIL;
956 }
957 }
958
959 target->debug_reason = DBG_REASON_DBGRQ;
960
961 return ERROR_OK;
962 }
963
964 static int cortex_a_internal_restore(struct target *target, int current,
965 uint32_t *address, int handle_breakpoints, int debug_execution)
966 {
967 struct armv7a_common *armv7a = target_to_armv7a(target);
968 struct arm *arm = &armv7a->arm;
969 int retval;
970 uint32_t resume_pc;
971
972 if (!debug_execution)
973 target_free_all_working_areas(target);
974
975 #if 0
976 if (debug_execution) {
977 /* Disable interrupts */
978 /* We disable interrupts in the PRIMASK register instead of
979 * masking with C_MASKINTS,
980 * This is probably the same issue as Cortex-M3 Errata 377493:
981 * C_MASKINTS in parallel with disabled interrupts can cause
982 * local faults to not be taken. */
983 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
984 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
985 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
986
987 /* Make sure we are in Thumb mode */
988 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
989 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
990 32) | (1 << 24));
991 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
992 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
993 }
994 #endif
995
996 /* current = 1: continue on current pc, otherwise continue at <address> */
997 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
998 if (!current)
999 resume_pc = *address;
1000 else
1001 *address = resume_pc;
1002
1003 /* Make sure that the Armv7 gdb thumb fixups does not
1004 * kill the return address
1005 */
1006 switch (arm->core_state) {
1007 case ARM_STATE_ARM:
1008 resume_pc &= 0xFFFFFFFC;
1009 break;
1010 case ARM_STATE_THUMB:
1011 case ARM_STATE_THUMB_EE:
1012 /* When the return address is loaded into PC
1013 * bit 0 must be 1 to stay in Thumb state
1014 */
1015 resume_pc |= 0x1;
1016 break;
1017 case ARM_STATE_JAZELLE:
1018 LOG_ERROR("How do I resume into Jazelle state??");
1019 return ERROR_FAIL;
1020 }
1021 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1022 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1023 arm->pc->dirty = 1;
1024 arm->pc->valid = 1;
1025 /* restore dpm_mode at system halt */
1026 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1027 /* called it now before restoring context because it uses cpu
1028 * register r0 for restoring cp15 control register */
1029 retval = cortex_a_restore_cp15_control_reg(target);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 retval = cortex_a_restore_context(target, handle_breakpoints);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 target->debug_reason = DBG_REASON_NOTHALTED;
1036 target->state = TARGET_RUNNING;
1037
1038 /* registers are now invalid */
1039 register_cache_invalidate(arm->core_cache);
1040
1041 #if 0
1042 /* the front-end may request us not to handle breakpoints */
1043 if (handle_breakpoints) {
1044 /* Single step past breakpoint at current address */
1045 breakpoint = breakpoint_find(target, resume_pc);
1046 if (breakpoint) {
1047 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1048 cortex_m3_unset_breakpoint(target, breakpoint);
1049 cortex_m3_single_step_core(target);
1050 cortex_m3_set_breakpoint(target, breakpoint);
1051 }
1052 }
1053
1054 #endif
1055 return retval;
1056 }
1057
1058 static int cortex_a_internal_restart(struct target *target)
1059 {
1060 struct armv7a_common *armv7a = target_to_armv7a(target);
1061 struct arm *arm = &armv7a->arm;
1062 struct adiv5_dap *swjdp = arm->dap;
1063 int retval;
1064 uint32_t dscr;
1065 /*
1066 * * Restart core and wait for it to be started. Clear ITRen and sticky
1067 * * exception flags: see ARMv7 ARM, C5.9.
1068 *
1069 * REVISIT: for single stepping, we probably want to
1070 * disable IRQs by default, with optional override...
1071 */
1072
1073 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1074 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 if ((dscr & DSCR_INSTR_COMP) == 0)
1079 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1080
1081 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1082 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1083 if (retval != ERROR_OK)
1084 return retval;
1085
1086 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1087 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1088 DRCR_CLEAR_EXCEPTIONS);
1089 if (retval != ERROR_OK)
1090 return retval;
1091
1092 long long then = timeval_ms();
1093 for (;; ) {
1094 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1095 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1096 if (retval != ERROR_OK)
1097 return retval;
1098 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1099 break;
1100 if (timeval_ms() > then + 1000) {
1101 LOG_ERROR("Timeout waiting for resume");
1102 return ERROR_FAIL;
1103 }
1104 }
1105
1106 target->debug_reason = DBG_REASON_NOTHALTED;
1107 target->state = TARGET_RUNNING;
1108
1109 /* registers are now invalid */
1110 register_cache_invalidate(arm->core_cache);
1111
1112 return ERROR_OK;
1113 }
1114
1115 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1116 {
1117 int retval = 0;
1118 struct target_list *head;
1119 struct target *curr;
1120 uint32_t address;
1121 head = target->head;
1122 while (head != (struct target_list *)NULL) {
1123 curr = head->target;
1124 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1125 /* resume current address , not in step mode */
1126 retval += cortex_a_internal_restore(curr, 1, &address,
1127 handle_breakpoints, 0);
1128 retval += cortex_a_internal_restart(curr);
1129 }
1130 head = head->next;
1131
1132 }
1133 return retval;
1134 }
1135
1136 static int cortex_a_resume(struct target *target, int current,
1137 uint32_t address, int handle_breakpoints, int debug_execution)
1138 {
1139 int retval = 0;
1140 /* dummy resume for smp toggle in order to reduce gdb impact */
1141 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1142 /* simulate a start and halt of target */
1143 target->gdb_service->target = NULL;
1144 target->gdb_service->core[0] = target->gdb_service->core[1];
1145 /* fake resume at next poll we play the target core[1], see poll*/
1146 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1147 return 0;
1148 }
1149 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1150 if (target->smp) {
1151 target->gdb_service->core[0] = -1;
1152 retval = cortex_a_restore_smp(target, handle_breakpoints);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 }
1156 cortex_a_internal_restart(target);
1157
1158 if (!debug_execution) {
1159 target->state = TARGET_RUNNING;
1160 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1161 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1162 } else {
1163 target->state = TARGET_DEBUG_RUNNING;
1164 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1165 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1166 }
1167
1168 return ERROR_OK;
1169 }
1170
1171 static int cortex_a_debug_entry(struct target *target)
1172 {
1173 int i;
1174 uint32_t regfile[16], cpsr, dscr;
1175 int retval = ERROR_OK;
1176 struct working_area *regfile_working_area = NULL;
1177 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1178 struct armv7a_common *armv7a = target_to_armv7a(target);
1179 struct arm *arm = &armv7a->arm;
1180 struct adiv5_dap *swjdp = armv7a->arm.dap;
1181 struct reg *reg;
1182
1183 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1184
1185 /* REVISIT surely we should not re-read DSCR !! */
1186 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1187 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1188 if (retval != ERROR_OK)
1189 return retval;
1190
1191 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1192 * imprecise data aborts get discarded by issuing a Data
1193 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1194 */
1195
1196 /* Enable the ITR execution once we are in debug mode */
1197 dscr |= DSCR_ITR_EN;
1198 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1199 armv7a->debug_base + CPUDBG_DSCR, dscr);
1200 if (retval != ERROR_OK)
1201 return retval;
1202
1203 /* Examine debug reason */
1204 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1205
1206 /* save address of instruction that triggered the watchpoint? */
1207 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1208 uint32_t wfar;
1209
1210 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1211 armv7a->debug_base + CPUDBG_WFAR,
1212 &wfar);
1213 if (retval != ERROR_OK)
1214 return retval;
1215 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1216 }
1217
1218 /* REVISIT fast_reg_read is never set ... */
1219
1220 /* Examine target state and mode */
1221 if (cortex_a->fast_reg_read)
1222 target_alloc_working_area(target, 64, &regfile_working_area);
1223
1224 /* First load register acessible through core debug port*/
1225 if (!regfile_working_area)
1226 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1227 else {
1228 retval = cortex_a_read_regs_through_mem(target,
1229 regfile_working_area->address, regfile);
1230
1231 target_free_working_area(target, regfile_working_area);
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 /* read Current PSR */
1236 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1237 /* store current cpsr */
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1242
1243 arm_set_cpsr(arm, cpsr);
1244
1245 /* update cache */
1246 for (i = 0; i <= ARM_PC; i++) {
1247 reg = arm_reg_current(arm, i);
1248
1249 buf_set_u32(reg->value, 0, 32, regfile[i]);
1250 reg->valid = 1;
1251 reg->dirty = 0;
1252 }
1253
1254 /* Fixup PC Resume Address */
1255 if (cpsr & (1 << 5)) {
1256 /* T bit set for Thumb or ThumbEE state */
1257 regfile[ARM_PC] -= 4;
1258 } else {
1259 /* ARM state */
1260 regfile[ARM_PC] -= 8;
1261 }
1262
1263 reg = arm->pc;
1264 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1265 reg->dirty = reg->valid;
1266 }
1267
1268 #if 0
1269 /* TODO, Move this */
1270 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1271 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1272 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1273
1274 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1275 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1276
1277 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1278 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1279 #endif
1280
1281 /* Are we in an exception handler */
1282 /* armv4_5->exception_number = 0; */
1283 if (armv7a->post_debug_entry) {
1284 retval = armv7a->post_debug_entry(target);
1285 if (retval != ERROR_OK)
1286 return retval;
1287 }
1288
1289 return retval;
1290 }
1291
1292 static int cortex_a_post_debug_entry(struct target *target)
1293 {
1294 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1295 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1296 int retval;
1297
1298 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1299 retval = armv7a->arm.mrc(target, 15,
1300 0, 0, /* op1, op2 */
1301 1, 0, /* CRn, CRm */
1302 &cortex_a->cp15_control_reg);
1303 if (retval != ERROR_OK)
1304 return retval;
1305 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1306 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1307
1308 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1309 armv7a_identify_cache(target);
1310
1311 if (armv7a->is_armv7r) {
1312 armv7a->armv7a_mmu.mmu_enabled = 0;
1313 } else {
1314 armv7a->armv7a_mmu.mmu_enabled =
1315 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1316 }
1317 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1318 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1319 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1320 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1321 cortex_a->curr_mode = armv7a->arm.core_mode;
1322
1323 return ERROR_OK;
1324 }
1325
1326 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1327 {
1328 struct armv7a_common *armv7a = target_to_armv7a(target);
1329 struct adiv5_dap *swjdp = armv7a->arm.dap;
1330 uint32_t dscr;
1331
1332 /* Read DSCR */
1333 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1334 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1335 if (ERROR_OK != retval)
1336 return retval;
1337
1338 /* clear bitfield */
1339 dscr &= ~bit_mask;
1340 /* put new value */
1341 dscr |= value & bit_mask;
1342
1343 /* write new DSCR */
1344 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1345 armv7a->debug_base + CPUDBG_DSCR, dscr);
1346 return retval;
1347 }
1348
1349 static int cortex_a_step(struct target *target, int current, uint32_t address,
1350 int handle_breakpoints)
1351 {
1352 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1353 struct armv7a_common *armv7a = target_to_armv7a(target);
1354 struct arm *arm = &armv7a->arm;
1355 struct breakpoint *breakpoint = NULL;
1356 struct breakpoint stepbreakpoint;
1357 struct reg *r;
1358 int retval;
1359
1360 if (target->state != TARGET_HALTED) {
1361 LOG_WARNING("target not halted");
1362 return ERROR_TARGET_NOT_HALTED;
1363 }
1364
1365 /* current = 1: continue on current pc, otherwise continue at <address> */
1366 r = arm->pc;
1367 if (!current)
1368 buf_set_u32(r->value, 0, 32, address);
1369 else
1370 address = buf_get_u32(r->value, 0, 32);
1371
1372 /* The front-end may request us not to handle breakpoints.
1373 * But since Cortex-A uses breakpoint for single step,
1374 * we MUST handle breakpoints.
1375 */
1376 handle_breakpoints = 1;
1377 if (handle_breakpoints) {
1378 breakpoint = breakpoint_find(target, address);
1379 if (breakpoint)
1380 cortex_a_unset_breakpoint(target, breakpoint);
1381 }
1382
1383 /* Setup single step breakpoint */
1384 stepbreakpoint.address = address;
1385 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1386 ? 2 : 4;
1387 stepbreakpoint.type = BKPT_HARD;
1388 stepbreakpoint.set = 0;
1389
1390 /* Disable interrupts during single step if requested */
1391 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1392 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1393 if (ERROR_OK != retval)
1394 return retval;
1395 }
1396
1397 /* Break on IVA mismatch */
1398 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1399
1400 target->debug_reason = DBG_REASON_SINGLESTEP;
1401
1402 retval = cortex_a_resume(target, 1, address, 0, 0);
1403 if (retval != ERROR_OK)
1404 return retval;
1405
1406 long long then = timeval_ms();
1407 while (target->state != TARGET_HALTED) {
1408 retval = cortex_a_poll(target);
1409 if (retval != ERROR_OK)
1410 return retval;
1411 if (timeval_ms() > then + 1000) {
1412 LOG_ERROR("timeout waiting for target halt");
1413 return ERROR_FAIL;
1414 }
1415 }
1416
1417 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1418
1419 /* Re-enable interrupts if they were disabled */
1420 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1421 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1422 if (ERROR_OK != retval)
1423 return retval;
1424 }
1425
1426
1427 target->debug_reason = DBG_REASON_BREAKPOINT;
1428
1429 if (breakpoint)
1430 cortex_a_set_breakpoint(target, breakpoint, 0);
1431
1432 if (target->state != TARGET_HALTED)
1433 LOG_DEBUG("target stepped");
1434
1435 return ERROR_OK;
1436 }
1437
1438 static int cortex_a_restore_context(struct target *target, bool bpwp)
1439 {
1440 struct armv7a_common *armv7a = target_to_armv7a(target);
1441
1442 LOG_DEBUG(" ");
1443
1444 if (armv7a->pre_restore_context)
1445 armv7a->pre_restore_context(target);
1446
1447 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1448 }
1449
1450 /*
1451 * Cortex-A Breakpoint and watchpoint functions
1452 */
1453
1454 /* Setup hardware Breakpoint Register Pair */
1455 static int cortex_a_set_breakpoint(struct target *target,
1456 struct breakpoint *breakpoint, uint8_t matchmode)
1457 {
1458 int retval;
1459 int brp_i = 0;
1460 uint32_t control;
1461 uint8_t byte_addr_select = 0x0F;
1462 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1463 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1464 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1465
1466 if (breakpoint->set) {
1467 LOG_WARNING("breakpoint already set");
1468 return ERROR_OK;
1469 }
1470
1471 if (breakpoint->type == BKPT_HARD) {
1472 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1473 brp_i++;
1474 if (brp_i >= cortex_a->brp_num) {
1475 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1476 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1477 }
1478 breakpoint->set = brp_i + 1;
1479 if (breakpoint->length == 2)
1480 byte_addr_select = (3 << (breakpoint->address & 0x02));
1481 control = ((matchmode & 0x7) << 20)
1482 | (byte_addr_select << 5)
1483 | (3 << 1) | 1;
1484 brp_list[brp_i].used = 1;
1485 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1486 brp_list[brp_i].control = control;
1487 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1488 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1489 brp_list[brp_i].value);
1490 if (retval != ERROR_OK)
1491 return retval;
1492 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1493 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1494 brp_list[brp_i].control);
1495 if (retval != ERROR_OK)
1496 return retval;
1497 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1498 brp_list[brp_i].control,
1499 brp_list[brp_i].value);
1500 } else if (breakpoint->type == BKPT_SOFT) {
1501 uint8_t code[4];
1502 if (breakpoint->length == 2)
1503 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1504 else
1505 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1506 retval = target_read_memory(target,
1507 breakpoint->address & 0xFFFFFFFE,
1508 breakpoint->length, 1,
1509 breakpoint->orig_instr);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 retval = target_write_memory(target,
1513 breakpoint->address & 0xFFFFFFFE,
1514 breakpoint->length, 1, code);
1515 if (retval != ERROR_OK)
1516 return retval;
1517 breakpoint->set = 0x11; /* Any nice value but 0 */
1518 }
1519
1520 return ERROR_OK;
1521 }
1522
1523 static int cortex_a_set_context_breakpoint(struct target *target,
1524 struct breakpoint *breakpoint, uint8_t matchmode)
1525 {
1526 int retval = ERROR_FAIL;
1527 int brp_i = 0;
1528 uint32_t control;
1529 uint8_t byte_addr_select = 0x0F;
1530 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1531 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1532 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1533
1534 if (breakpoint->set) {
1535 LOG_WARNING("breakpoint already set");
1536 return retval;
1537 }
1538 /*check available context BRPs*/
1539 while ((brp_list[brp_i].used ||
1540 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1541 brp_i++;
1542
1543 if (brp_i >= cortex_a->brp_num) {
1544 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1545 return ERROR_FAIL;
1546 }
1547
1548 breakpoint->set = brp_i + 1;
1549 control = ((matchmode & 0x7) << 20)
1550 | (byte_addr_select << 5)
1551 | (3 << 1) | 1;
1552 brp_list[brp_i].used = 1;
1553 brp_list[brp_i].value = (breakpoint->asid);
1554 brp_list[brp_i].control = control;
1555 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1556 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1557 brp_list[brp_i].value);
1558 if (retval != ERROR_OK)
1559 return retval;
1560 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1561 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1562 brp_list[brp_i].control);
1563 if (retval != ERROR_OK)
1564 return retval;
1565 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1566 brp_list[brp_i].control,
1567 brp_list[brp_i].value);
1568 return ERROR_OK;
1569
1570 }
1571
1572 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1573 {
1574 int retval = ERROR_FAIL;
1575 int brp_1 = 0; /* holds the contextID pair */
1576 int brp_2 = 0; /* holds the IVA pair */
1577 uint32_t control_CTX, control_IVA;
1578 uint8_t CTX_byte_addr_select = 0x0F;
1579 uint8_t IVA_byte_addr_select = 0x0F;
1580 uint8_t CTX_machmode = 0x03;
1581 uint8_t IVA_machmode = 0x01;
1582 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1583 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1584 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1585
1586 if (breakpoint->set) {
1587 LOG_WARNING("breakpoint already set");
1588 return retval;
1589 }
1590 /*check available context BRPs*/
1591 while ((brp_list[brp_1].used ||
1592 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1593 brp_1++;
1594
1595 printf("brp(CTX) found num: %d\n", brp_1);
1596 if (brp_1 >= cortex_a->brp_num) {
1597 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1598 return ERROR_FAIL;
1599 }
1600
1601 while ((brp_list[brp_2].used ||
1602 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1603 brp_2++;
1604
1605 printf("brp(IVA) found num: %d\n", brp_2);
1606 if (brp_2 >= cortex_a->brp_num) {
1607 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1608 return ERROR_FAIL;
1609 }
1610
1611 breakpoint->set = brp_1 + 1;
1612 breakpoint->linked_BRP = brp_2;
1613 control_CTX = ((CTX_machmode & 0x7) << 20)
1614 | (brp_2 << 16)
1615 | (0 << 14)
1616 | (CTX_byte_addr_select << 5)
1617 | (3 << 1) | 1;
1618 brp_list[brp_1].used = 1;
1619 brp_list[brp_1].value = (breakpoint->asid);
1620 brp_list[brp_1].control = control_CTX;
1621 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1622 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1623 brp_list[brp_1].value);
1624 if (retval != ERROR_OK)
1625 return retval;
1626 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1627 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1628 brp_list[brp_1].control);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 control_IVA = ((IVA_machmode & 0x7) << 20)
1633 | (brp_1 << 16)
1634 | (IVA_byte_addr_select << 5)
1635 | (3 << 1) | 1;
1636 brp_list[brp_2].used = 1;
1637 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1638 brp_list[brp_2].control = control_IVA;
1639 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1640 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1641 brp_list[brp_2].value);
1642 if (retval != ERROR_OK)
1643 return retval;
1644 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1645 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1646 brp_list[brp_2].control);
1647 if (retval != ERROR_OK)
1648 return retval;
1649
1650 return ERROR_OK;
1651 }
1652
1653 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1654 {
1655 int retval;
1656 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1657 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1658 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1659
1660 if (!breakpoint->set) {
1661 LOG_WARNING("breakpoint not set");
1662 return ERROR_OK;
1663 }
1664
1665 if (breakpoint->type == BKPT_HARD) {
1666 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1667 int brp_i = breakpoint->set - 1;
1668 int brp_j = breakpoint->linked_BRP;
1669 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1670 LOG_DEBUG("Invalid BRP number in breakpoint");
1671 return ERROR_OK;
1672 }
1673 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1674 brp_list[brp_i].control, brp_list[brp_i].value);
1675 brp_list[brp_i].used = 0;
1676 brp_list[brp_i].value = 0;
1677 brp_list[brp_i].control = 0;
1678 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1679 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1680 brp_list[brp_i].control);
1681 if (retval != ERROR_OK)
1682 return retval;
1683 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1684 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1685 brp_list[brp_i].value);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1689 LOG_DEBUG("Invalid BRP number in breakpoint");
1690 return ERROR_OK;
1691 }
1692 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1693 brp_list[brp_j].control, brp_list[brp_j].value);
1694 brp_list[brp_j].used = 0;
1695 brp_list[brp_j].value = 0;
1696 brp_list[brp_j].control = 0;
1697 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1698 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1699 brp_list[brp_j].control);
1700 if (retval != ERROR_OK)
1701 return retval;
1702 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1703 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1704 brp_list[brp_j].value);
1705 if (retval != ERROR_OK)
1706 return retval;
1707 breakpoint->linked_BRP = 0;
1708 breakpoint->set = 0;
1709 return ERROR_OK;
1710
1711 } else {
1712 int brp_i = breakpoint->set - 1;
1713 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1714 LOG_DEBUG("Invalid BRP number in breakpoint");
1715 return ERROR_OK;
1716 }
1717 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1718 brp_list[brp_i].control, brp_list[brp_i].value);
1719 brp_list[brp_i].used = 0;
1720 brp_list[brp_i].value = 0;
1721 brp_list[brp_i].control = 0;
1722 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1723 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1724 brp_list[brp_i].control);
1725 if (retval != ERROR_OK)
1726 return retval;
1727 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1728 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1729 brp_list[brp_i].value);
1730 if (retval != ERROR_OK)
1731 return retval;
1732 breakpoint->set = 0;
1733 return ERROR_OK;
1734 }
1735 } else {
1736 /* restore original instruction (kept in target endianness) */
1737 if (breakpoint->length == 4) {
1738 retval = target_write_memory(target,
1739 breakpoint->address & 0xFFFFFFFE,
1740 4, 1, breakpoint->orig_instr);
1741 if (retval != ERROR_OK)
1742 return retval;
1743 } else {
1744 retval = target_write_memory(target,
1745 breakpoint->address & 0xFFFFFFFE,
1746 2, 1, breakpoint->orig_instr);
1747 if (retval != ERROR_OK)
1748 return retval;
1749 }
1750 }
1751 breakpoint->set = 0;
1752
1753 return ERROR_OK;
1754 }
1755
1756 static int cortex_a_add_breakpoint(struct target *target,
1757 struct breakpoint *breakpoint)
1758 {
1759 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1760
1761 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1762 LOG_INFO("no hardware breakpoint available");
1763 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1764 }
1765
1766 if (breakpoint->type == BKPT_HARD)
1767 cortex_a->brp_num_available--;
1768
1769 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1770 }
1771
1772 static int cortex_a_add_context_breakpoint(struct target *target,
1773 struct breakpoint *breakpoint)
1774 {
1775 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1776
1777 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1778 LOG_INFO("no hardware breakpoint available");
1779 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1780 }
1781
1782 if (breakpoint->type == BKPT_HARD)
1783 cortex_a->brp_num_available--;
1784
1785 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1786 }
1787
1788 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1789 struct breakpoint *breakpoint)
1790 {
1791 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1792
1793 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1794 LOG_INFO("no hardware breakpoint available");
1795 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1796 }
1797
1798 if (breakpoint->type == BKPT_HARD)
1799 cortex_a->brp_num_available--;
1800
1801 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1802 }
1803
1804
1805 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1806 {
1807 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1808
1809 #if 0
1810 /* It is perfectly possible to remove breakpoints while the target is running */
1811 if (target->state != TARGET_HALTED) {
1812 LOG_WARNING("target not halted");
1813 return ERROR_TARGET_NOT_HALTED;
1814 }
1815 #endif
1816
1817 if (breakpoint->set) {
1818 cortex_a_unset_breakpoint(target, breakpoint);
1819 if (breakpoint->type == BKPT_HARD)
1820 cortex_a->brp_num_available++;
1821 }
1822
1823
1824 return ERROR_OK;
1825 }
1826
1827 /*
1828 * Cortex-A Reset functions
1829 */
1830
1831 static int cortex_a_assert_reset(struct target *target)
1832 {
1833 struct armv7a_common *armv7a = target_to_armv7a(target);
1834
1835 LOG_DEBUG(" ");
1836
1837 /* FIXME when halt is requested, make it work somehow... */
1838
1839 /* Issue some kind of warm reset. */
1840 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1841 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1842 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1843 /* REVISIT handle "pulls" cases, if there's
1844 * hardware that needs them to work.
1845 */
1846 jtag_add_reset(0, 1);
1847 } else {
1848 LOG_ERROR("%s: how to reset?", target_name(target));
1849 return ERROR_FAIL;
1850 }
1851
1852 /* registers are now invalid */
1853 register_cache_invalidate(armv7a->arm.core_cache);
1854
1855 target->state = TARGET_RESET;
1856
1857 return ERROR_OK;
1858 }
1859
1860 static int cortex_a_deassert_reset(struct target *target)
1861 {
1862 int retval;
1863
1864 LOG_DEBUG(" ");
1865
1866 /* be certain SRST is off */
1867 jtag_add_reset(0, 0);
1868
1869 retval = cortex_a_poll(target);
1870 if (retval != ERROR_OK)
1871 return retval;
1872
1873 if (target->reset_halt) {
1874 if (target->state != TARGET_HALTED) {
1875 LOG_WARNING("%s: ran after reset and before halt ...",
1876 target_name(target));
1877 retval = target_halt(target);
1878 if (retval != ERROR_OK)
1879 return retval;
1880 }
1881 }
1882
1883 return ERROR_OK;
1884 }
1885
1886 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1887 {
1888 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1889 * New desired mode must be in mode. Current value of DSCR must be in
1890 * *dscr, which is updated with new value.
1891 *
1892 * This function elides actually sending the mode-change over the debug
1893 * interface if the mode is already set as desired.
1894 */
1895 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1896 if (new_dscr != *dscr) {
1897 struct armv7a_common *armv7a = target_to_armv7a(target);
1898 int retval = mem_ap_sel_write_atomic_u32(armv7a->arm.dap,
1899 armv7a->debug_ap, armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1900 if (retval == ERROR_OK)
1901 *dscr = new_dscr;
1902 return retval;
1903 } else {
1904 return ERROR_OK;
1905 }
1906 }
1907
1908 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1909 uint32_t value, uint32_t *dscr)
1910 {
1911 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1912 struct armv7a_common *armv7a = target_to_armv7a(target);
1913 struct adiv5_dap *swjdp = armv7a->arm.dap;
1914 long long then = timeval_ms();
1915 int retval;
1916
1917 while ((*dscr & mask) != value) {
1918 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1919 armv7a->debug_base + CPUDBG_DSCR, dscr);
1920 if (retval != ERROR_OK)
1921 return retval;
1922 if (timeval_ms() > then + 1000) {
1923 LOG_ERROR("timeout waiting for DSCR bit change");
1924 return ERROR_FAIL;
1925 }
1926 }
1927 return ERROR_OK;
1928 }
1929
1930 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1931 uint32_t *data, uint32_t *dscr)
1932 {
1933 int retval;
1934 struct armv7a_common *armv7a = target_to_armv7a(target);
1935 struct adiv5_dap *swjdp = armv7a->arm.dap;
1936
1937 /* Move from coprocessor to R0. */
1938 retval = cortex_a_exec_opcode(target, opcode, dscr);
1939 if (retval != ERROR_OK)
1940 return retval;
1941
1942 /* Move from R0 to DTRTX. */
1943 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1944 if (retval != ERROR_OK)
1945 return retval;
1946
1947 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1948 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1949 * must also check TXfull_l). Most of the time this will be free
1950 * because TXfull_l will be set immediately and cached in dscr. */
1951 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1952 DSCR_DTRTX_FULL_LATCHED, dscr);
1953 if (retval != ERROR_OK)
1954 return retval;
1955
1956 /* Read the value transferred to DTRTX. */
1957 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1958 armv7a->debug_base + CPUDBG_DTRTX, data);
1959 if (retval != ERROR_OK)
1960 return retval;
1961
1962 return ERROR_OK;
1963 }
1964
1965 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1966 uint32_t *dfsr, uint32_t *dscr)
1967 {
1968 int retval;
1969
1970 if (dfar) {
1971 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1972 if (retval != ERROR_OK)
1973 return retval;
1974 }
1975
1976 if (dfsr) {
1977 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1978 if (retval != ERROR_OK)
1979 return retval;
1980 }
1981
1982 return ERROR_OK;
1983 }
1984
1985 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1986 uint32_t data, uint32_t *dscr)
1987 {
1988 int retval;
1989 struct armv7a_common *armv7a = target_to_armv7a(target);
1990 struct adiv5_dap *swjdp = armv7a->arm.dap;
1991
1992 /* Write the value into DTRRX. */
1993 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1994 armv7a->debug_base + CPUDBG_DTRRX, data);
1995 if (retval != ERROR_OK)
1996 return retval;
1997
1998 /* Move from DTRRX to R0. */
1999 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2000 if (retval != ERROR_OK)
2001 return retval;
2002
2003 /* Move from R0 to coprocessor. */
2004 retval = cortex_a_exec_opcode(target, opcode, dscr);
2005 if (retval != ERROR_OK)
2006 return retval;
2007
2008 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2009 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2010 * check RXfull_l). Most of the time this will be free because RXfull_l
2011 * will be cleared immediately and cached in dscr. */
2012 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 return ERROR_OK;
2017 }
2018
2019 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2020 uint32_t dfsr, uint32_t *dscr)
2021 {
2022 int retval;
2023
2024 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2025 if (retval != ERROR_OK)
2026 return retval;
2027
2028 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2029 if (retval != ERROR_OK)
2030 return retval;
2031
2032 return ERROR_OK;
2033 }
2034
2035 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2036 {
2037 uint32_t status, upper4;
2038
2039 if (dfsr & (1 << 9)) {
2040 /* LPAE format. */
2041 status = dfsr & 0x3f;
2042 upper4 = status >> 2;
2043 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2044 return ERROR_TARGET_TRANSLATION_FAULT;
2045 else if (status == 33)
2046 return ERROR_TARGET_UNALIGNED_ACCESS;
2047 else
2048 return ERROR_TARGET_DATA_ABORT;
2049 } else {
2050 /* Normal format. */
2051 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2052 if (status == 1)
2053 return ERROR_TARGET_UNALIGNED_ACCESS;
2054 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2055 status == 9 || status == 11 || status == 13 || status == 15)
2056 return ERROR_TARGET_TRANSLATION_FAULT;
2057 else
2058 return ERROR_TARGET_DATA_ABORT;
2059 }
2060 }
2061
2062 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2063 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2064 {
2065 /* Writes count objects of size size from *buffer. Old value of DSCR must
2066 * be in *dscr; updated to new value. This is slow because it works for
2067 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2068 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2069 * preferred.
2070 * Preconditions:
2071 * - Address is in R0.
2072 * - R0 is marked dirty.
2073 */
2074 struct armv7a_common *armv7a = target_to_armv7a(target);
2075 struct adiv5_dap *swjdp = armv7a->arm.dap;
2076 struct arm *arm = &armv7a->arm;
2077 int retval;
2078
2079 /* Mark register R1 as dirty, to use for transferring data. */
2080 arm_reg_current(arm, 1)->dirty = true;
2081
2082 /* Switch to non-blocking mode if not already in that mode. */
2083 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2084 if (retval != ERROR_OK)
2085 return retval;
2086
2087 /* Go through the objects. */
2088 while (count) {
2089 /* Write the value to store into DTRRX. */
2090 uint32_t data, opcode;
2091 if (size == 1)
2092 data = *buffer;
2093 else if (size == 2)
2094 data = target_buffer_get_u16(target, buffer);
2095 else
2096 data = target_buffer_get_u32(target, buffer);
2097 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2098 armv7a->debug_base + CPUDBG_DTRRX, data);
2099 if (retval != ERROR_OK)
2100 return retval;
2101
2102 /* Transfer the value from DTRRX to R1. */
2103 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2104 if (retval != ERROR_OK)
2105 return retval;
2106
2107 /* Write the value transferred to R1 into memory. */
2108 if (size == 1)
2109 opcode = ARMV4_5_STRB_IP(1, 0);
2110 else if (size == 2)
2111 opcode = ARMV4_5_STRH_IP(1, 0);
2112 else
2113 opcode = ARMV4_5_STRW_IP(1, 0);
2114 retval = cortex_a_exec_opcode(target, opcode, dscr);
2115 if (retval != ERROR_OK)
2116 return retval;
2117
2118 /* Check for faults and return early. */
2119 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2120 return ERROR_OK; /* A data fault is not considered a system failure. */
2121
2122 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2123 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2124 * must also check RXfull_l). Most of the time this will be free
2125 * because RXfull_l will be cleared immediately and cached in dscr. */
2126 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2127 if (retval != ERROR_OK)
2128 return retval;
2129
2130 /* Advance. */
2131 buffer += size;
2132 --count;
2133 }
2134
2135 return ERROR_OK;
2136 }
2137
2138 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2139 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2140 {
2141 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2142 * in *dscr; updated to new value. This is fast but only works for
2143 * word-sized objects at aligned addresses.
2144 * Preconditions:
2145 * - Address is in R0 and must be a multiple of 4.
2146 * - R0 is marked dirty.
2147 */
2148 struct armv7a_common *armv7a = target_to_armv7a(target);
2149 struct adiv5_dap *swjdp = armv7a->arm.dap;
2150 int retval;
2151
2152 /* Switch to fast mode if not already in that mode. */
2153 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2154 if (retval != ERROR_OK)
2155 return retval;
2156
2157 /* Latch STC instruction. */
2158 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2159 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2160 if (retval != ERROR_OK)
2161 return retval;
2162
2163 /* Transfer all the data and issue all the instructions. */
2164 return mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2165 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2166 }
2167
2168 static int cortex_a_write_apb_ab_memory(struct target *target,
2169 uint32_t address, uint32_t size,
2170 uint32_t count, const uint8_t *buffer)
2171 {
2172 /* Write memory through APB-AP. */
2173 int retval, final_retval;
2174 struct armv7a_common *armv7a = target_to_armv7a(target);
2175 struct adiv5_dap *swjdp = armv7a->arm.dap;
2176 struct arm *arm = &armv7a->arm;
2177 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2178
2179 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2180 address, size, count);
2181 if (target->state != TARGET_HALTED) {
2182 LOG_WARNING("target not halted");
2183 return ERROR_TARGET_NOT_HALTED;
2184 }
2185
2186 if (!count)
2187 return ERROR_OK;
2188
2189 /* Clear any abort. */
2190 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2191 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2192 if (retval != ERROR_OK)
2193 return retval;
2194
2195 /* Read DSCR. */
2196 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2197 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2198 if (retval != ERROR_OK)
2199 return retval;
2200
2201 /* Switch to non-blocking mode if not already in that mode. */
2202 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2203 if (retval != ERROR_OK)
2204 goto out;
2205
2206 /* Mark R0 as dirty. */
2207 arm_reg_current(arm, 0)->dirty = true;
2208
2209 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2210 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2211 if (retval != ERROR_OK)
2212 goto out;
2213
2214 /* Get the memory address into R0. */
2215 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2216 armv7a->debug_base + CPUDBG_DTRRX, address);
2217 if (retval != ERROR_OK)
2218 goto out;
2219 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2220 if (retval != ERROR_OK)
2221 goto out;
2222
2223 if (size == 4 && (address % 4) == 0) {
2224 /* We are doing a word-aligned transfer, so use fast mode. */
2225 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2226 } else {
2227 /* Use slow path. */
2228 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2229 }
2230
2231 out:
2232 final_retval = retval;
2233
2234 /* Switch to non-blocking mode if not already in that mode. */
2235 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2236 if (final_retval == ERROR_OK)
2237 final_retval = retval;
2238
2239 /* Wait for last issued instruction to complete. */
2240 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2241 if (final_retval == ERROR_OK)
2242 final_retval = retval;
2243
2244 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2245 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2246 * check RXfull_l). Most of the time this will be free because RXfull_l
2247 * will be cleared immediately and cached in dscr. However, don’t do this
2248 * if there is fault, because then the instruction might not have completed
2249 * successfully. */
2250 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2251 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2252 if (retval != ERROR_OK)
2253 return retval;
2254 }
2255
2256 /* If there were any sticky abort flags, clear them. */
2257 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2258 fault_dscr = dscr;
2259 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2260 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2261 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2262 } else {
2263 fault_dscr = 0;
2264 }
2265
2266 /* Handle synchronous data faults. */
2267 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2268 if (final_retval == ERROR_OK) {
2269 /* Final return value will reflect cause of fault. */
2270 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2271 if (retval == ERROR_OK) {
2272 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2273 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2274 } else
2275 final_retval = retval;
2276 }
2277 /* Fault destroyed DFAR/DFSR; restore them. */
2278 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2279 if (retval != ERROR_OK)
2280 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2281 }
2282
2283 /* Handle asynchronous data faults. */
2284 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2285 if (final_retval == ERROR_OK)
2286 /* No other error has been recorded so far, so keep this one. */
2287 final_retval = ERROR_TARGET_DATA_ABORT;
2288 }
2289
2290 /* If the DCC is nonempty, clear it. */
2291 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2292 uint32_t dummy;
2293 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2294 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2295 if (final_retval == ERROR_OK)
2296 final_retval = retval;
2297 }
2298 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2299 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2300 if (final_retval == ERROR_OK)
2301 final_retval = retval;
2302 }
2303
2304 /* Done. */
2305 return final_retval;
2306 }
2307
2308 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2309 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2310 {
2311 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2312 * in *dscr; updated to new value. This is slow because it works for
2313 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2314 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2315 * preferred.
2316 * Preconditions:
2317 * - Address is in R0.
2318 * - R0 is marked dirty.
2319 */
2320 struct armv7a_common *armv7a = target_to_armv7a(target);
2321 struct adiv5_dap *swjdp = armv7a->arm.dap;
2322 struct arm *arm = &armv7a->arm;
2323 int retval;
2324
2325 /* Mark register R1 as dirty, to use for transferring data. */
2326 arm_reg_current(arm, 1)->dirty = true;
2327
2328 /* Switch to non-blocking mode if not already in that mode. */
2329 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2330 if (retval != ERROR_OK)
2331 return retval;
2332
2333 /* Go through the objects. */
2334 while (count) {
2335 /* Issue a load of the appropriate size to R1. */
2336 uint32_t opcode, data;
2337 if (size == 1)
2338 opcode = ARMV4_5_LDRB_IP(1, 0);
2339 else if (size == 2)
2340 opcode = ARMV4_5_LDRH_IP(1, 0);
2341 else
2342 opcode = ARMV4_5_LDRW_IP(1, 0);
2343 retval = cortex_a_exec_opcode(target, opcode, dscr);
2344 if (retval != ERROR_OK)
2345 return retval;
2346
2347 /* Issue a write of R1 to DTRTX. */
2348 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2349 if (retval != ERROR_OK)
2350 return retval;
2351
2352 /* Check for faults and return early. */
2353 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2354 return ERROR_OK; /* A data fault is not considered a system failure. */
2355
2356 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2357 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2358 * must also check TXfull_l). Most of the time this will be free
2359 * because TXfull_l will be set immediately and cached in dscr. */
2360 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2361 DSCR_DTRTX_FULL_LATCHED, dscr);
2362 if (retval != ERROR_OK)
2363 return retval;
2364
2365 /* Read the value transferred to DTRTX into the buffer. */
2366 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2367 armv7a->debug_base + CPUDBG_DTRTX, &data);
2368 if (retval != ERROR_OK)
2369 return retval;
2370 if (size == 1)
2371 *buffer = (uint8_t) data;
2372 else if (size == 2)
2373 target_buffer_set_u16(target, buffer, (uint16_t) data);
2374 else
2375 target_buffer_set_u32(target, buffer, data);
2376
2377 /* Advance. */
2378 buffer += size;
2379 --count;
2380 }
2381
2382 return ERROR_OK;
2383 }
2384
2385 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2386 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2387 {
2388 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2389 * *dscr; updated to new value. This is fast but only works for word-sized
2390 * objects at aligned addresses.
2391 * Preconditions:
2392 * - Address is in R0 and must be a multiple of 4.
2393 * - R0 is marked dirty.
2394 */
2395 struct armv7a_common *armv7a = target_to_armv7a(target);
2396 struct adiv5_dap *swjdp = armv7a->arm.dap;
2397 uint32_t new_dscr, u32;
2398 int retval;
2399
2400 /* Switch to non-blocking mode if not already in that mode. */
2401 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2402 if (retval != ERROR_OK)
2403 return retval;
2404
2405 if (count > 1) {
2406 /* Consecutively issue the LDC instruction via a write to ITR and
2407 * change to fast mode, in a single bulk copy since DSCR == ITR + 4.
2408 * The instruction is issued into the core before the mode switch. */
2409 uint8_t command[8];
2410 target_buffer_set_u32(target, command, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2411 new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2412 target_buffer_set_u32(target, command + 4, new_dscr);
2413 retval = mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, command, 4, 2,
2414 armv7a->debug_base + CPUDBG_ITR);
2415 if (retval != ERROR_OK)
2416 return retval;
2417 *dscr = new_dscr;
2418
2419 /* Read the value transferred to DTRTX into the buffer. Due to fast
2420 * mode rules, this blocks until the instruction finishes executing and
2421 * then reissues the read instruction to read the next word from
2422 * memory. The last read of DTRTX in this call reads the second-to-last
2423 * word from memory and issues the read instruction for the last word.
2424 */
2425 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2426 4, count - 1, armv7a->debug_base + CPUDBG_DTRTX);
2427 if (retval != ERROR_OK)
2428 return retval;
2429
2430 /* Advance. */
2431 buffer += (count - 1) * 4;
2432 } else {
2433 /* Issue the LDC instruction via a write to ITR. */
2434 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2435 if (retval != ERROR_OK)
2436 return retval;
2437 }
2438
2439 /* Switch to non-blocking mode if not already in that mode. */
2440 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2441 if (retval != ERROR_OK)
2442 return retval;
2443
2444 /* Wait for last issued instruction to complete. */
2445 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2446 if (retval != ERROR_OK)
2447 return retval;
2448
2449 /* Check for faults and return early. */
2450 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2451 return ERROR_OK; /* A data fault is not considered a system failure. */
2452
2453 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2454 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2455 * check TXfull_l). Most of the time this will be free because TXfull_l
2456 * will be set immediately and cached in dscr. */
2457 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2458 DSCR_DTRTX_FULL_LATCHED, dscr);
2459 if (retval != ERROR_OK)
2460 return retval;
2461
2462 /* Read the value transferred to DTRTX into the buffer. This is the last
2463 * word. */
2464 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2465 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2466 if (retval != ERROR_OK)
2467 return retval;
2468 target_buffer_set_u32(target, buffer, u32);
2469
2470 return ERROR_OK;
2471 }
2472
2473 static int cortex_a_read_apb_ab_memory(struct target *target,
2474 uint32_t address, uint32_t size,
2475 uint32_t count, uint8_t *buffer)
2476 {
2477 /* Read memory through APB-AP. */
2478 int retval, final_retval;
2479 struct armv7a_common *armv7a = target_to_armv7a(target);
2480 struct adiv5_dap *swjdp = armv7a->arm.dap;
2481 struct arm *arm = &armv7a->arm;
2482 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2483
2484 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2485 address, size, count);
2486 if (target->state != TARGET_HALTED) {
2487 LOG_WARNING("target not halted");
2488 return ERROR_TARGET_NOT_HALTED;
2489 }
2490
2491 if (!count)
2492 return ERROR_OK;
2493
2494 /* Clear any abort. */
2495 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2496 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2497 if (retval != ERROR_OK)
2498 return retval;
2499
2500 /* Read DSCR */
2501 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2502 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2503 if (retval != ERROR_OK)
2504 return retval;
2505
2506 /* Switch to non-blocking mode if not already in that mode. */
2507 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2508 if (retval != ERROR_OK)
2509 goto out;
2510
2511 /* Mark R0 as dirty. */
2512 arm_reg_current(arm, 0)->dirty = true;
2513
2514 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2515 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2516 if (retval != ERROR_OK)
2517 goto out;
2518
2519 /* Get the memory address into R0. */
2520 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2521 armv7a->debug_base + CPUDBG_DTRRX, address);
2522 if (retval != ERROR_OK)
2523 goto out;
2524 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2525 if (retval != ERROR_OK)
2526 goto out;
2527
2528 if (size == 4 && (address % 4) == 0) {
2529 /* We are doing a word-aligned transfer, so use fast mode. */
2530 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2531 } else {
2532 /* Use slow path. */
2533 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2534 }
2535
2536 out:
2537 final_retval = retval;
2538
2539 /* Switch to non-blocking mode if not already in that mode. */
2540 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2541 if (final_retval == ERROR_OK)
2542 final_retval = retval;
2543
2544 /* Wait for last issued instruction to complete. */
2545 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2546 if (final_retval == ERROR_OK)
2547 final_retval = retval;
2548
2549 /* If there were any sticky abort flags, clear them. */
2550 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2551 fault_dscr = dscr;
2552 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2553 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2554 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2555 } else {
2556 fault_dscr = 0;
2557 }
2558
2559 /* Handle synchronous data faults. */
2560 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2561 if (final_retval == ERROR_OK) {
2562 /* Final return value will reflect cause of fault. */
2563 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2564 if (retval == ERROR_OK) {
2565 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2566 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2567 } else
2568 final_retval = retval;
2569 }
2570 /* Fault destroyed DFAR/DFSR; restore them. */
2571 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2572 if (retval != ERROR_OK)
2573 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2574 }
2575
2576 /* Handle asynchronous data faults. */
2577 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2578 if (final_retval == ERROR_OK)
2579 /* No other error has been recorded so far, so keep this one. */
2580 final_retval = ERROR_TARGET_DATA_ABORT;
2581 }
2582
2583 /* If the DCC is nonempty, clear it. */
2584 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2585 uint32_t dummy;
2586 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2587 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2588 if (final_retval == ERROR_OK)
2589 final_retval = retval;
2590 }
2591 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2592 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2593 if (final_retval == ERROR_OK)
2594 final_retval = retval;
2595 }
2596
2597 /* Done. */
2598 return final_retval;
2599 }
2600
2601
2602 /*
2603 * Cortex-A Memory access
2604 *
2605 * This is same Cortex M3 but we must also use the correct
2606 * ap number for every access.
2607 */
2608
2609 static int cortex_a_read_phys_memory(struct target *target,
2610 uint32_t address, uint32_t size,
2611 uint32_t count, uint8_t *buffer)
2612 {
2613 struct armv7a_common *armv7a = target_to_armv7a(target);
2614 struct adiv5_dap *swjdp = armv7a->arm.dap;
2615 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2616 uint8_t apsel = swjdp->apsel;
2617 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2618 address, size, count);
2619
2620 if (count && buffer) {
2621
2622 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2623
2624 /* read memory through AHB-AP */
2625 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2626 } else {
2627
2628 /* read memory through APB-AP */
2629 if (!armv7a->is_armv7r) {
2630 /* disable mmu */
2631 retval = cortex_a_mmu_modify(target, 0);
2632 if (retval != ERROR_OK)
2633 return retval;
2634 }
2635 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2636 }
2637 }
2638 return retval;
2639 }
2640
2641 static int cortex_a_read_memory(struct target *target, uint32_t address,
2642 uint32_t size, uint32_t count, uint8_t *buffer)
2643 {
2644 int mmu_enabled = 0;
2645 uint32_t virt, phys;
2646 int retval;
2647 struct armv7a_common *armv7a = target_to_armv7a(target);
2648 struct adiv5_dap *swjdp = armv7a->arm.dap;
2649 uint8_t apsel = swjdp->apsel;
2650
2651 /* cortex_a handles unaligned memory access */
2652 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2653 size, count);
2654
2655 /* determine if MMU was enabled on target stop */
2656 if (!armv7a->is_armv7r) {
2657 retval = cortex_a_mmu(target, &mmu_enabled);
2658 if (retval != ERROR_OK)
2659 return retval;
2660 }
2661
2662 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2663 if (mmu_enabled) {
2664 virt = address;
2665 retval = cortex_a_virt2phys(target, virt, &phys);
2666 if (retval != ERROR_OK)
2667 return retval;
2668
2669 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2670 virt, phys);
2671 address = phys;
2672 }
2673 retval = cortex_a_read_phys_memory(target, address, size,
2674 count, buffer);
2675 } else {
2676 if (mmu_enabled) {
2677 retval = cortex_a_check_address(target, address);
2678 if (retval != ERROR_OK)
2679 return retval;
2680 /* enable MMU as we could have disabled it for phys access */
2681 retval = cortex_a_mmu_modify(target, 1);
2682 if (retval != ERROR_OK)
2683 return retval;
2684 }
2685 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2686 }
2687 return retval;
2688 }
2689
2690 static int cortex_a_write_phys_memory(struct target *target,
2691 uint32_t address, uint32_t size,
2692 uint32_t count, const uint8_t *buffer)
2693 {
2694 struct armv7a_common *armv7a = target_to_armv7a(target);
2695 struct adiv5_dap *swjdp = armv7a->arm.dap;
2696 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2697 uint8_t apsel = swjdp->apsel;
2698
2699 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2700 size, count);
2701
2702 if (count && buffer) {
2703
2704 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2705
2706 /* write memory through AHB-AP */
2707 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2708 } else {
2709
2710 /* write memory through APB-AP */
2711 if (!armv7a->is_armv7r) {
2712 retval = cortex_a_mmu_modify(target, 0);
2713 if (retval != ERROR_OK)
2714 return retval;
2715 }
2716 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2717 }
2718 }
2719
2720
2721 /* REVISIT this op is generic ARMv7-A/R stuff */
2722 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2723 struct arm_dpm *dpm = armv7a->arm.dpm;
2724
2725 retval = dpm->prepare(dpm);
2726 if (retval != ERROR_OK)
2727 return retval;
2728
2729 /* The Cache handling will NOT work with MMU active, the
2730 * wrong addresses will be invalidated!
2731 *
2732 * For both ICache and DCache, walk all cache lines in the
2733 * address range. Cortex-A has fixed 64 byte line length.
2734 *
2735 * REVISIT per ARMv7, these may trigger watchpoints ...
2736 */
2737
2738 /* invalidate I-Cache */
2739 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2740 /* ICIMVAU - Invalidate Cache single entry
2741 * with MVA to PoU
2742 * MCR p15, 0, r0, c7, c5, 1
2743 */
2744 for (uint32_t cacheline = 0;
2745 cacheline < size * count;
2746 cacheline += 64) {
2747 retval = dpm->instr_write_data_r0(dpm,
2748 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2749 address + cacheline);
2750 if (retval != ERROR_OK)
2751 return retval;
2752 }
2753 }
2754
2755 /* invalidate D-Cache */
2756 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2757 /* DCIMVAC - Invalidate data Cache line
2758 * with MVA to PoC
2759 * MCR p15, 0, r0, c7, c6, 1
2760 */
2761 for (uint32_t cacheline = 0;
2762 cacheline < size * count;
2763 cacheline += 64) {
2764 retval = dpm->instr_write_data_r0(dpm,
2765 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2766 address + cacheline);
2767 if (retval != ERROR_OK)
2768 return retval;
2769 }
2770 }
2771
2772 /* (void) */ dpm->finish(dpm);
2773 }
2774
2775 return retval;
2776 }
2777
2778 static int cortex_a_write_memory(struct target *target, uint32_t address,
2779 uint32_t size, uint32_t count, const uint8_t *buffer)
2780 {
2781 int mmu_enabled = 0;
2782 uint32_t virt, phys;
2783 int retval;
2784 struct armv7a_common *armv7a = target_to_armv7a(target);
2785 struct adiv5_dap *swjdp = armv7a->arm.dap;
2786 uint8_t apsel = swjdp->apsel;
2787
2788 /* cortex_a handles unaligned memory access */
2789 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2790 size, count);
2791
2792 /* determine if MMU was enabled on target stop */
2793 if (!armv7a->is_armv7r) {
2794 retval = cortex_a_mmu(target, &mmu_enabled);
2795 if (retval != ERROR_OK)
2796 return retval;
2797 }
2798
2799 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2800 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2801 count);
2802 if (mmu_enabled) {
2803 virt = address;
2804 retval = cortex_a_virt2phys(target, virt, &phys);
2805 if (retval != ERROR_OK)
2806 return retval;
2807
2808 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2809 virt,
2810 phys);
2811 address = phys;
2812 }
2813 retval = cortex_a_write_phys_memory(target, address, size,
2814 count, buffer);
2815 } else {
2816 if (mmu_enabled) {
2817 retval = cortex_a_check_address(target, address);
2818 if (retval != ERROR_OK)
2819 return retval;
2820 /* enable MMU as we could have disabled it for phys access */
2821 retval = cortex_a_mmu_modify(target, 1);
2822 if (retval != ERROR_OK)
2823 return retval;
2824 }
2825 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2826 }
2827 return retval;
2828 }
2829
2830 static int cortex_a_handle_target_request(void *priv)
2831 {
2832 struct target *target = priv;
2833 struct armv7a_common *armv7a = target_to_armv7a(target);
2834 struct adiv5_dap *swjdp = armv7a->arm.dap;
2835 int retval;
2836
2837 if (!target_was_examined(target))
2838 return ERROR_OK;
2839 if (!target->dbg_msg_enabled)
2840 return ERROR_OK;
2841
2842 if (target->state == TARGET_RUNNING) {
2843 uint32_t request;
2844 uint32_t dscr;
2845 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2846 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2847
2848 /* check if we have data */
2849 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2850 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2851 armv7a->debug_base + CPUDBG_DTRTX, &request);
2852 if (retval == ERROR_OK) {
2853 target_request(target, request);
2854 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2855 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2856 }
2857 }
2858 }
2859
2860 return ERROR_OK;
2861 }
2862
2863 /*
2864 * Cortex-A target information and configuration
2865 */
2866
2867 static int cortex_a_examine_first(struct target *target)
2868 {
2869 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2870 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2871 struct adiv5_dap *swjdp = armv7a->arm.dap;
2872 int i;
2873 int retval = ERROR_OK;
2874 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2875
2876 /* We do one extra read to ensure DAP is configured,
2877 * we call ahbap_debugport_init(swjdp) instead
2878 */
2879 retval = ahbap_debugport_init(swjdp);
2880 if (retval != ERROR_OK)
2881 return retval;
2882
2883 /* Search for the APB-AB - it is needed for access to debug registers */
2884 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2885 if (retval != ERROR_OK) {
2886 LOG_ERROR("Could not find APB-AP for debug access");
2887 return retval;
2888 }
2889 /* Search for the AHB-AB */
2890 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2891 if (retval != ERROR_OK) {
2892 /* AHB-AP not found - use APB-AP */
2893 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2894 armv7a->memory_ap_available = false;
2895 } else {
2896 armv7a->memory_ap_available = true;
2897 }
2898
2899
2900 if (!target->dbgbase_set) {
2901 uint32_t dbgbase;
2902 /* Get ROM Table base */
2903 uint32_t apid;
2904 int32_t coreidx = target->coreid;
2905 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2906 target->cmd_name);
2907 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2908 if (retval != ERROR_OK)
2909 return retval;
2910 /* Lookup 0x15 -- Processor DAP */
2911 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2912 &armv7a->debug_base, &coreidx);
2913 if (retval != ERROR_OK) {
2914 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2915 target->cmd_name);
2916 return retval;
2917 }
2918 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2919 coreidx, armv7a->debug_base);
2920 } else
2921 armv7a->debug_base = target->dbgbase;
2922
2923 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2924 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2925 if (retval != ERROR_OK)
2926 return retval;
2927
2928 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2929 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2930 if (retval != ERROR_OK) {
2931 LOG_DEBUG("Examine %s failed", "CPUID");
2932 return retval;
2933 }
2934
2935 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2936 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2937 if (retval != ERROR_OK) {
2938 LOG_DEBUG("Examine %s failed", "CTYPR");
2939 return retval;
2940 }
2941
2942 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2943 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2944 if (retval != ERROR_OK) {
2945 LOG_DEBUG("Examine %s failed", "TTYPR");
2946 return retval;
2947 }
2948
2949 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2950 armv7a->debug_base + CPUDBG_DIDR, &didr);
2951 if (retval != ERROR_OK) {
2952 LOG_DEBUG("Examine %s failed", "DIDR");
2953 return retval;
2954 }
2955
2956 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2957 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2958 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2959 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2960
2961 cortex_a->cpuid = cpuid;
2962 cortex_a->ctypr = ctypr;
2963 cortex_a->ttypr = ttypr;
2964 cortex_a->didr = didr;
2965
2966 /* Unlocking the debug registers */
2967 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2968 CORTEX_A15_PARTNUM) {
2969
2970 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2971 armv7a->debug_base + CPUDBG_OSLAR,
2972 0);
2973
2974 if (retval != ERROR_OK)
2975 return retval;
2976
2977 }
2978 /* Unlocking the debug registers */
2979 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2980 CORTEX_A7_PARTNUM) {
2981
2982 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2983 armv7a->debug_base + CPUDBG_OSLAR,
2984 0);
2985
2986 if (retval != ERROR_OK)
2987 return retval;
2988
2989 }
2990 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2991 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2992
2993 if (retval != ERROR_OK)
2994 return retval;
2995
2996 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2997
2998 armv7a->arm.core_type = ARM_MODE_MON;
2999 retval = cortex_a_dpm_setup(cortex_a, didr);
3000 if (retval != ERROR_OK)
3001 return retval;
3002
3003 /* Setup Breakpoint Register Pairs */
3004 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3005 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3006 cortex_a->brp_num_available = cortex_a->brp_num;
3007 free(cortex_a->brp_list);
3008 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3009 /* cortex_a->brb_enabled = ????; */
3010 for (i = 0; i < cortex_a->brp_num; i++) {
3011 cortex_a->brp_list[i].used = 0;
3012 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3013 cortex_a->brp_list[i].type = BRP_NORMAL;
3014 else
3015 cortex_a->brp_list[i].type = BRP_CONTEXT;
3016 cortex_a->brp_list[i].value = 0;
3017 cortex_a->brp_list[i].control = 0;
3018 cortex_a->brp_list[i].BRPn = i;
3019 }
3020
3021 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3022
3023 target_set_examined(target);
3024 return ERROR_OK;
3025 }
3026
3027 static int cortex_a_examine(struct target *target)
3028 {
3029 int retval = ERROR_OK;
3030
3031 /* Reestablish communication after target reset */
3032 retval = cortex_a_examine_first(target);
3033
3034 /* Configure core debug access */
3035 if (retval == ERROR_OK)
3036 retval = cortex_a_init_debug_access(target);
3037
3038 return retval;
3039 }
3040
3041 /*
3042 * Cortex-A target creation and initialization
3043 */
3044
3045 static int cortex_a_init_target(struct command_context *cmd_ctx,
3046 struct target *target)
3047 {
3048 /* examine_first() does a bunch of this */
3049 return ERROR_OK;
3050 }
3051
3052 static int cortex_a_init_arch_info(struct target *target,
3053 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3054 {
3055 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3056 struct adiv5_dap *dap = &armv7a->dap;
3057
3058 armv7a->arm.dap = dap;
3059
3060 /* Setup struct cortex_a_common */
3061 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3062 /* tap has no dap initialized */
3063 if (!tap->dap) {
3064 armv7a->arm.dap = dap;
3065 /* Setup struct cortex_a_common */
3066
3067 /* prepare JTAG information for the new target */
3068 cortex_a->jtag_info.tap = tap;
3069 cortex_a->jtag_info.scann_size = 4;
3070
3071 /* Leave (only) generic DAP stuff for debugport_init() */
3072 dap->jtag_info = &cortex_a->jtag_info;
3073
3074 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
3075 dap->tar_autoincr_block = (1 << 10);
3076 dap->memaccess_tck = 80;
3077 tap->dap = dap;
3078 } else
3079 armv7a->arm.dap = tap->dap;
3080
3081 cortex_a->fast_reg_read = 0;
3082
3083 /* register arch-specific functions */
3084 armv7a->examine_debug_reason = NULL;
3085
3086 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3087
3088 armv7a->pre_restore_context = NULL;
3089
3090 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3091
3092
3093 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3094
3095 /* REVISIT v7a setup should be in a v7a-specific routine */
3096 armv7a_init_arch_info(target, armv7a);
3097 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3098
3099 return ERROR_OK;
3100 }
3101
3102 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3103 {
3104 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3105
3106 cortex_a->armv7a_common.is_armv7r = false;
3107
3108 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3109 }
3110
3111 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3112 {
3113 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3114
3115 cortex_a->armv7a_common.is_armv7r = true;
3116
3117 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3118 }
3119
3120
3121 static int cortex_a_mmu(struct target *target, int *enabled)
3122 {
3123 if (target->state != TARGET_HALTED) {
3124 LOG_ERROR("%s: target not halted", __func__);
3125 return ERROR_TARGET_INVALID;
3126 }
3127
3128 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3129 return ERROR_OK;
3130 }
3131
3132 static int cortex_a_virt2phys(struct target *target,
3133 uint32_t virt, uint32_t *phys)
3134 {
3135 int retval = ERROR_FAIL;
3136 struct armv7a_common *armv7a = target_to_armv7a(target);
3137 struct adiv5_dap *swjdp = armv7a->arm.dap;
3138 uint8_t apsel = swjdp->apsel;
3139 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
3140 uint32_t ret;
3141 retval = armv7a_mmu_translate_va(target,
3142 virt, &ret);
3143 if (retval != ERROR_OK)
3144 goto done;
3145 *phys = ret;
3146 } else {/* use this method if armv7a->memory_ap not selected
3147 * mmu must be enable in order to get a correct translation */
3148 retval = cortex_a_mmu_modify(target, 1);
3149 if (retval != ERROR_OK)
3150 goto done;
3151 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3152 }
3153 done:
3154 return retval;
3155 }
3156
3157 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3158 {
3159 struct target *target = get_current_target(CMD_CTX);
3160 struct armv7a_common *armv7a = target_to_armv7a(target);
3161
3162 return armv7a_handle_cache_info_command(CMD_CTX,
3163 &armv7a->armv7a_mmu.armv7a_cache);
3164 }
3165
3166
3167 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3168 {
3169 struct target *target = get_current_target(CMD_CTX);
3170 if (!target_was_examined(target)) {
3171 LOG_ERROR("target not examined yet");
3172 return ERROR_FAIL;
3173 }
3174
3175 return cortex_a_init_debug_access(target);
3176 }
3177 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3178 {
3179 struct target *target = get_current_target(CMD_CTX);
3180 /* check target is an smp target */
3181 struct target_list *head;
3182 struct target *curr;
3183 head = target->head;
3184 target->smp = 0;
3185 if (head != (struct target_list *)NULL) {
3186 while (head != (struct target_list *)NULL) {
3187 curr = head->target;
3188 curr->smp = 0;
3189 head = head->next;
3190 }
3191 /* fixes the target display to the debugger */
3192 target->gdb_service->target = target;
3193 }
3194 return ERROR_OK;
3195 }
3196
3197 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3198 {
3199 struct target *target = get_current_target(CMD_CTX);
3200 struct target_list *head;
3201 struct target *curr;
3202 head = target->head;
3203 if (head != (struct target_list *)NULL) {
3204 target->smp = 1;
3205 while (head != (struct target_list *)NULL) {
3206 curr = head->target;
3207 curr->smp = 1;
3208 head = head->next;
3209 }
3210 }
3211 return ERROR_OK;
3212 }
3213
3214 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3215 {
3216 struct target *target = get_current_target(CMD_CTX);
3217 int retval = ERROR_OK;
3218 struct target_list *head;
3219 head = target->head;
3220 if (head != (struct target_list *)NULL) {
3221 if (CMD_ARGC == 1) {
3222 int coreid = 0;
3223 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3224 if (ERROR_OK != retval)
3225 return retval;
3226 target->gdb_service->core[1] = coreid;
3227
3228 }
3229 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3230 , target->gdb_service->core[1]);
3231 }
3232 return ERROR_OK;
3233 }
3234
3235 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3236 {
3237 struct target *target = get_current_target(CMD_CTX);
3238 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3239
3240 static const Jim_Nvp nvp_maskisr_modes[] = {
3241 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3242 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3243 { .name = NULL, .value = -1 },
3244 };
3245 const Jim_Nvp *n;
3246
3247 if (target->state != TARGET_HALTED) {
3248 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3249 return ERROR_OK;
3250 }
3251
3252 if (CMD_ARGC > 0) {
3253 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3254 if (n->name == NULL)
3255 return ERROR_COMMAND_SYNTAX_ERROR;
3256 cortex_a->isrmasking_mode = n->value;
3257
3258 }
3259
3260 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3261 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3262
3263 return ERROR_OK;
3264 }
3265
3266 static const struct command_registration cortex_a_exec_command_handlers[] = {
3267 {
3268 .name = "cache_info",
3269 .handler = cortex_a_handle_cache_info_command,
3270 .mode = COMMAND_EXEC,
3271 .help = "display information about target caches",
3272 .usage = "",
3273 },
3274 {
3275 .name = "dbginit",
3276 .handler = cortex_a_handle_dbginit_command,
3277 .mode = COMMAND_EXEC,
3278 .help = "Initialize core debug",
3279 .usage = "",
3280 },
3281 { .name = "smp_off",
3282 .handler = cortex_a_handle_smp_off_command,
3283 .mode = COMMAND_EXEC,
3284 .help = "Stop smp handling",
3285 .usage = "",},
3286 {
3287 .name = "smp_on",
3288 .handler = cortex_a_handle_smp_on_command,
3289 .mode = COMMAND_EXEC,
3290 .help = "Restart smp handling",
3291 .usage = "",
3292 },
3293 {
3294 .name = "smp_gdb",
3295 .handler = cortex_a_handle_smp_gdb_command,
3296 .mode = COMMAND_EXEC,
3297 .help = "display/fix current core played to gdb",
3298 .usage = "",
3299 },
3300 {
3301 .name = "maskisr",
3302 .handler = handle_cortex_a_mask_interrupts_command,
3303 .mode = COMMAND_EXEC,
3304 .help = "mask cortex_a interrupts",
3305 .usage = "['on'|'off']",
3306 },
3307
3308
3309 COMMAND_REGISTRATION_DONE
3310 };
3311 static const struct command_registration cortex_a_command_handlers[] = {
3312 {
3313 .chain = arm_command_handlers,
3314 },
3315 {
3316 .chain = armv7a_command_handlers,
3317 },
3318 {
3319 .name = "cortex_a",
3320 .mode = COMMAND_ANY,
3321 .help = "Cortex-A command group",
3322 .usage = "",
3323 .chain = cortex_a_exec_command_handlers,
3324 },
3325 COMMAND_REGISTRATION_DONE
3326 };
3327
3328 struct target_type cortexa_target = {
3329 .name = "cortex_a",
3330 .deprecated_name = "cortex_a8",
3331
3332 .poll = cortex_a_poll,
3333 .arch_state = armv7a_arch_state,
3334
3335 .halt = cortex_a_halt,
3336 .resume = cortex_a_resume,
3337 .step = cortex_a_step,
3338
3339 .assert_reset = cortex_a_assert_reset,
3340 .deassert_reset = cortex_a_deassert_reset,
3341
3342 /* REVISIT allow exporting VFP3 registers ... */
3343 .get_gdb_reg_list = arm_get_gdb_reg_list,
3344
3345 .read_memory = cortex_a_read_memory,
3346 .write_memory = cortex_a_write_memory,
3347
3348 .checksum_memory = arm_checksum_memory,
3349 .blank_check_memory = arm_blank_check_memory,
3350
3351 .run_algorithm = armv4_5_run_algorithm,
3352
3353 .add_breakpoint = cortex_a_add_breakpoint,
3354 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3355 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3356 .remove_breakpoint = cortex_a_remove_breakpoint,
3357 .add_watchpoint = NULL,
3358 .remove_watchpoint = NULL,
3359
3360 .commands = cortex_a_command_handlers,
3361 .target_create = cortex_a_target_create,
3362 .init_target = cortex_a_init_target,
3363 .examine = cortex_a_examine,
3364
3365 .read_phys_memory = cortex_a_read_phys_memory,
3366 .write_phys_memory = cortex_a_write_phys_memory,
3367 .mmu = cortex_a_mmu,
3368 .virt2phys = cortex_a_virt2phys,
3369 };
3370
3371 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3372 {
3373 .name = "cache_info",
3374 .handler = cortex_a_handle_cache_info_command,
3375 .mode = COMMAND_EXEC,
3376 .help = "display information about target caches",
3377 .usage = "",
3378 },
3379 {
3380 .name = "dbginit",
3381 .handler = cortex_a_handle_dbginit_command,
3382 .mode = COMMAND_EXEC,
3383 .help = "Initialize core debug",
3384 .usage = "",
3385 },
3386 {
3387 .name = "maskisr",
3388 .handler = handle_cortex_a_mask_interrupts_command,
3389 .mode = COMMAND_EXEC,
3390 .help = "mask cortex_r4 interrupts",
3391 .usage = "['on'|'off']",
3392 },
3393
3394 COMMAND_REGISTRATION_DONE
3395 };
3396 static const struct command_registration cortex_r4_command_handlers[] = {
3397 {
3398 .chain = arm_command_handlers,
3399 },
3400 {
3401 .chain = armv7a_command_handlers,
3402 },
3403 {
3404 .name = "cortex_r4",
3405 .mode = COMMAND_ANY,
3406 .help = "Cortex-R4 command group",
3407 .usage = "",
3408 .chain = cortex_r4_exec_command_handlers,
3409 },
3410 COMMAND_REGISTRATION_DONE
3411 };
3412
3413 struct target_type cortexr4_target = {
3414 .name = "cortex_r4",
3415
3416 .poll = cortex_a_poll,
3417 .arch_state = armv7a_arch_state,
3418
3419 .halt = cortex_a_halt,
3420 .resume = cortex_a_resume,
3421 .step = cortex_a_step,
3422
3423 .assert_reset = cortex_a_assert_reset,
3424 .deassert_reset = cortex_a_deassert_reset,
3425
3426 /* REVISIT allow exporting VFP3 registers ... */
3427 .get_gdb_reg_list = arm_get_gdb_reg_list,
3428
3429 .read_memory = cortex_a_read_memory,
3430 .write_memory = cortex_a_write_memory,
3431
3432 .checksum_memory = arm_checksum_memory,
3433 .blank_check_memory = arm_blank_check_memory,
3434
3435 .run_algorithm = armv4_5_run_algorithm,
3436
3437 .add_breakpoint = cortex_a_add_breakpoint,
3438 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3439 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3440 .remove_breakpoint = cortex_a_remove_breakpoint,
3441 .add_watchpoint = NULL,
3442 .remove_watchpoint = NULL,
3443
3444 .commands = cortex_r4_command_handlers,
3445 .target_create = cortex_r4_target_create,
3446 .init_target = cortex_a_init_target,
3447 .examine = cortex_a_examine,
3448 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)