Cortex-A: Don't flush the data/unified cache if MMU is off
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_apb_ab_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
142 cortex_a->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a->cp15_control_reg_curr);
147 }
148 } else {
149 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
150 if (cortex_a->cp15_control_reg_curr & 0x4U) {
151 /* data cache is active */
152 cortex_a->cp15_control_reg_curr &= ~0x4U;
153 /* flush data cache armv7 function to be called */
154 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
155 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
156 }
157 cortex_a->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175
176 LOG_DEBUG(" ");
177
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
181 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
182 if (retval != ERROR_OK) {
183 /* try again */
184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
185 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
186 if (retval == ERROR_OK)
187 LOG_USER(
188 "Locking debug access failed on first, but succeeded on second try.");
189 }
190
191 return retval;
192 }
193
194 /*
195 * Cortex-A Basic debug access, very low level assumes state is saved
196 */
197 static int cortex_a_init_debug_access(struct target *target)
198 {
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = armv7a->arm.dap;
201 int retval;
202 uint32_t dbg_osreg;
203 uint32_t cortex_part_num;
204 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
205
206 LOG_DEBUG(" ");
207 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT;
209
210 switch (cortex_part_num) {
211 case CORTEX_A7_PARTNUM:
212 case CORTEX_A15_PARTNUM:
213 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
214 armv7a->debug_base + CPUDBG_OSLSR,
215 &dbg_osreg);
216 if (retval != ERROR_OK)
217 return retval;
218
219 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
220
221 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
222 /* Unlocking the DEBUG OS registers for modification */
223 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_OSLAR,
225 0);
226 break;
227
228 case CORTEX_A5_PARTNUM:
229 case CORTEX_A8_PARTNUM:
230 case CORTEX_A9_PARTNUM:
231 default:
232 retval = cortex_a8_init_debug_access(target);
233 }
234
235 if (retval != ERROR_OK)
236 return retval;
237 /* Clear Sticky Power Down status Bit in PRSR to enable access to
238 the registers in the Core Power Domain */
239 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
240 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
241 LOG_DEBUG("target->coreid %d DBGPRSR 0x%x ", target->coreid, dbg_osreg);
242
243 if (retval != ERROR_OK)
244 return retval;
245
246 /* Enabling of instruction execution in debug mode is done in debug_entry code */
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return cortex_a_poll(target);
252 }
253
254 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
255 {
256 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
257 * Writes final value of DSCR into *dscr. Pass force to force always
258 * reading DSCR at least once. */
259 struct armv7a_common *armv7a = target_to_armv7a(target);
260 struct adiv5_dap *swjdp = armv7a->arm.dap;
261 long long then = timeval_ms();
262 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
263 force = false;
264 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
265 armv7a->debug_base + CPUDBG_DSCR, dscr);
266 if (retval != ERROR_OK) {
267 LOG_ERROR("Could not read DSCR register");
268 return retval;
269 }
270 if (timeval_ms() > then + 1000) {
271 LOG_ERROR("Timeout waiting for InstrCompl=1");
272 return ERROR_FAIL;
273 }
274 }
275 return ERROR_OK;
276 }
277
278 /* To reduce needless round-trips, pass in a pointer to the current
279 * DSCR value. Initialize it to zero if you just need to know the
280 * value on return from this function; or DSCR_INSTR_COMP if you
281 * happen to know that no instruction is pending.
282 */
283 static int cortex_a_exec_opcode(struct target *target,
284 uint32_t opcode, uint32_t *dscr_p)
285 {
286 uint32_t dscr;
287 int retval;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289 struct adiv5_dap *swjdp = armv7a->arm.dap;
290
291 dscr = dscr_p ? *dscr_p : 0;
292
293 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
294
295 /* Wait for InstrCompl bit to be set */
296 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
297 if (retval != ERROR_OK)
298 return retval;
299
300 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
301 armv7a->debug_base + CPUDBG_ITR, opcode);
302 if (retval != ERROR_OK)
303 return retval;
304
305 long long then = timeval_ms();
306 do {
307 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
308 armv7a->debug_base + CPUDBG_DSCR, &dscr);
309 if (retval != ERROR_OK) {
310 LOG_ERROR("Could not read DSCR register");
311 return retval;
312 }
313 if (timeval_ms() > then + 1000) {
314 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
315 return ERROR_FAIL;
316 }
317 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
318
319 if (dscr_p)
320 *dscr_p = dscr;
321
322 return retval;
323 }
324
325 /**************************************************************************
326 Read core register with very few exec_opcode, fast but needs work_area.
327 This can cause problems with MMU active.
328 **************************************************************************/
329 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
330 uint32_t *regfile)
331 {
332 int retval = ERROR_OK;
333 struct armv7a_common *armv7a = target_to_armv7a(target);
334 struct adiv5_dap *swjdp = armv7a->arm.dap;
335
336 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
337 if (retval != ERROR_OK)
338 return retval;
339 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
340 if (retval != ERROR_OK)
341 return retval;
342 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
343 if (retval != ERROR_OK)
344 return retval;
345
346 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
347 (uint8_t *)(&regfile[1]), 4, 15, address);
348
349 return retval;
350 }
351
352 static int cortex_a_dap_read_coreregister_u32(struct target *target,
353 uint32_t *value, int regnum)
354 {
355 int retval = ERROR_OK;
356 uint8_t reg = regnum&0xFF;
357 uint32_t dscr = 0;
358 struct armv7a_common *armv7a = target_to_armv7a(target);
359 struct adiv5_dap *swjdp = armv7a->arm.dap;
360
361 if (reg > 17)
362 return retval;
363
364 if (reg < 15) {
365 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
366 retval = cortex_a_exec_opcode(target,
367 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
368 &dscr);
369 if (retval != ERROR_OK)
370 return retval;
371 } else if (reg == 15) {
372 /* "MOV r0, r15"; then move r0 to DCCTX */
373 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 retval = cortex_a_exec_opcode(target,
377 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
378 &dscr);
379 if (retval != ERROR_OK)
380 return retval;
381 } else {
382 /* "MRS r0, CPSR" or "MRS r0, SPSR"
383 * then move r0 to DCCTX
384 */
385 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
386 if (retval != ERROR_OK)
387 return retval;
388 retval = cortex_a_exec_opcode(target,
389 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
390 &dscr);
391 if (retval != ERROR_OK)
392 return retval;
393 }
394
395 /* Wait for DTRRXfull then read DTRRTX */
396 long long then = timeval_ms();
397 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
398 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
399 armv7a->debug_base + CPUDBG_DSCR, &dscr);
400 if (retval != ERROR_OK)
401 return retval;
402 if (timeval_ms() > then + 1000) {
403 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
404 return ERROR_FAIL;
405 }
406 }
407
408 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
409 armv7a->debug_base + CPUDBG_DTRTX, value);
410 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
411
412 return retval;
413 }
414
415 static int cortex_a_dap_write_coreregister_u32(struct target *target,
416 uint32_t value, int regnum)
417 {
418 int retval = ERROR_OK;
419 uint8_t Rd = regnum&0xFF;
420 uint32_t dscr;
421 struct armv7a_common *armv7a = target_to_armv7a(target);
422 struct adiv5_dap *swjdp = armv7a->arm.dap;
423
424 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
425
426 /* Check that DCCRX is not full */
427 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
428 armv7a->debug_base + CPUDBG_DSCR, &dscr);
429 if (retval != ERROR_OK)
430 return retval;
431 if (dscr & DSCR_DTR_RX_FULL) {
432 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
433 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
434 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
435 &dscr);
436 if (retval != ERROR_OK)
437 return retval;
438 }
439
440 if (Rd > 17)
441 return retval;
442
443 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
444 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
445 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
446 armv7a->debug_base + CPUDBG_DTRRX, value);
447 if (retval != ERROR_OK)
448 return retval;
449
450 if (Rd < 15) {
451 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
452 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
453 &dscr);
454
455 if (retval != ERROR_OK)
456 return retval;
457 } else if (Rd == 15) {
458 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
459 * then "mov r15, r0"
460 */
461 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
466 if (retval != ERROR_OK)
467 return retval;
468 } else {
469 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
470 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
471 */
472 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 &dscr);
474 if (retval != ERROR_OK)
475 return retval;
476 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
480
481 /* "Prefetch flush" after modifying execution status in CPSR */
482 if (Rd == 16) {
483 retval = cortex_a_exec_opcode(target,
484 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
485 &dscr);
486 if (retval != ERROR_OK)
487 return retval;
488 }
489 }
490
491 return retval;
492 }
493
494 /* Write to memory mapped registers directly with no cache or mmu handling */
495 static int cortex_a_dap_write_memap_register_u32(struct target *target,
496 uint32_t address,
497 uint32_t value)
498 {
499 int retval;
500 struct armv7a_common *armv7a = target_to_armv7a(target);
501 struct adiv5_dap *swjdp = armv7a->arm.dap;
502
503 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
504
505 return retval;
506 }
507
508 /*
509 * Cortex-A implementation of Debug Programmer's Model
510 *
511 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
512 * so there's no need to poll for it before executing an instruction.
513 *
514 * NOTE that in several of these cases the "stall" mode might be useful.
515 * It'd let us queue a few operations together... prepare/finish might
516 * be the places to enable/disable that mode.
517 */
518
519 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
520 {
521 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
522 }
523
524 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
525 {
526 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
527 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
528 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
529 }
530
531 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
532 uint32_t *dscr_p)
533 {
534 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
535 uint32_t dscr = DSCR_INSTR_COMP;
536 int retval;
537
538 if (dscr_p)
539 dscr = *dscr_p;
540
541 /* Wait for DTRRXfull */
542 long long then = timeval_ms();
543 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
544 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
545 a->armv7a_common.debug_base + CPUDBG_DSCR,
546 &dscr);
547 if (retval != ERROR_OK)
548 return retval;
549 if (timeval_ms() > then + 1000) {
550 LOG_ERROR("Timeout waiting for read dcc");
551 return ERROR_FAIL;
552 }
553 }
554
555 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
556 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
557 if (retval != ERROR_OK)
558 return retval;
559 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
560
561 if (dscr_p)
562 *dscr_p = dscr;
563
564 return retval;
565 }
566
567 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
568 {
569 struct cortex_a_common *a = dpm_to_a(dpm);
570 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
571 uint32_t dscr;
572 int retval;
573
574 /* set up invariant: INSTR_COMP is set after ever DPM operation */
575 long long then = timeval_ms();
576 for (;; ) {
577 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
578 a->armv7a_common.debug_base + CPUDBG_DSCR,
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582 if ((dscr & DSCR_INSTR_COMP) != 0)
583 break;
584 if (timeval_ms() > then + 1000) {
585 LOG_ERROR("Timeout waiting for dpm prepare");
586 return ERROR_FAIL;
587 }
588 }
589
590 /* this "should never happen" ... */
591 if (dscr & DSCR_DTR_RX_FULL) {
592 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
593 /* Clear DCCRX */
594 retval = cortex_a_exec_opcode(
595 a->armv7a_common.arm.target,
596 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
597 &dscr);
598 if (retval != ERROR_OK)
599 return retval;
600 }
601
602 return retval;
603 }
604
605 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
606 {
607 /* REVISIT what could be done here? */
608 return ERROR_OK;
609 }
610
611 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
612 uint32_t opcode, uint32_t data)
613 {
614 struct cortex_a_common *a = dpm_to_a(dpm);
615 int retval;
616 uint32_t dscr = DSCR_INSTR_COMP;
617
618 retval = cortex_a_write_dcc(a, data);
619 if (retval != ERROR_OK)
620 return retval;
621
622 return cortex_a_exec_opcode(
623 a->armv7a_common.arm.target,
624 opcode,
625 &dscr);
626 }
627
628 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
629 uint32_t opcode, uint32_t data)
630 {
631 struct cortex_a_common *a = dpm_to_a(dpm);
632 uint32_t dscr = DSCR_INSTR_COMP;
633 int retval;
634
635 retval = cortex_a_write_dcc(a, data);
636 if (retval != ERROR_OK)
637 return retval;
638
639 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
640 retval = cortex_a_exec_opcode(
641 a->armv7a_common.arm.target,
642 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
643 &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 /* then the opcode, taking data from R0 */
648 retval = cortex_a_exec_opcode(
649 a->armv7a_common.arm.target,
650 opcode,
651 &dscr);
652
653 return retval;
654 }
655
656 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
657 {
658 struct target *target = dpm->arm->target;
659 uint32_t dscr = DSCR_INSTR_COMP;
660
661 /* "Prefetch flush" after modifying execution status in CPSR */
662 return cortex_a_exec_opcode(target,
663 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
664 &dscr);
665 }
666
667 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
668 uint32_t opcode, uint32_t *data)
669 {
670 struct cortex_a_common *a = dpm_to_a(dpm);
671 int retval;
672 uint32_t dscr = DSCR_INSTR_COMP;
673
674 /* the opcode, writing data to DCC */
675 retval = cortex_a_exec_opcode(
676 a->armv7a_common.arm.target,
677 opcode,
678 &dscr);
679 if (retval != ERROR_OK)
680 return retval;
681
682 return cortex_a_read_dcc(a, data, &dscr);
683 }
684
685
686 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
687 uint32_t opcode, uint32_t *data)
688 {
689 struct cortex_a_common *a = dpm_to_a(dpm);
690 uint32_t dscr = DSCR_INSTR_COMP;
691 int retval;
692
693 /* the opcode, writing data to R0 */
694 retval = cortex_a_exec_opcode(
695 a->armv7a_common.arm.target,
696 opcode,
697 &dscr);
698 if (retval != ERROR_OK)
699 return retval;
700
701 /* write R0 to DCC */
702 retval = cortex_a_exec_opcode(
703 a->armv7a_common.arm.target,
704 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
705 &dscr);
706 if (retval != ERROR_OK)
707 return retval;
708
709 return cortex_a_read_dcc(a, data, &dscr);
710 }
711
712 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
713 uint32_t addr, uint32_t control)
714 {
715 struct cortex_a_common *a = dpm_to_a(dpm);
716 uint32_t vr = a->armv7a_common.debug_base;
717 uint32_t cr = a->armv7a_common.debug_base;
718 int retval;
719
720 switch (index_t) {
721 case 0 ... 15: /* breakpoints */
722 vr += CPUDBG_BVR_BASE;
723 cr += CPUDBG_BCR_BASE;
724 break;
725 case 16 ... 31: /* watchpoints */
726 vr += CPUDBG_WVR_BASE;
727 cr += CPUDBG_WCR_BASE;
728 index_t -= 16;
729 break;
730 default:
731 return ERROR_FAIL;
732 }
733 vr += 4 * index_t;
734 cr += 4 * index_t;
735
736 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
737 (unsigned) vr, (unsigned) cr);
738
739 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
740 vr, addr);
741 if (retval != ERROR_OK)
742 return retval;
743 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
744 cr, control);
745 return retval;
746 }
747
748 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
749 {
750 struct cortex_a_common *a = dpm_to_a(dpm);
751 uint32_t cr;
752
753 switch (index_t) {
754 case 0 ... 15:
755 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
756 break;
757 case 16 ... 31:
758 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
759 index_t -= 16;
760 break;
761 default:
762 return ERROR_FAIL;
763 }
764 cr += 4 * index_t;
765
766 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
767
768 /* clear control register */
769 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
770 }
771
772 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
773 {
774 struct arm_dpm *dpm = &a->armv7a_common.dpm;
775 int retval;
776
777 dpm->arm = &a->armv7a_common.arm;
778 dpm->didr = didr;
779
780 dpm->prepare = cortex_a_dpm_prepare;
781 dpm->finish = cortex_a_dpm_finish;
782
783 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
784 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
785 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
786
787 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
788 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
789
790 dpm->bpwp_enable = cortex_a_bpwp_enable;
791 dpm->bpwp_disable = cortex_a_bpwp_disable;
792
793 retval = arm_dpm_setup(dpm);
794 if (retval == ERROR_OK)
795 retval = arm_dpm_initialize(dpm);
796
797 return retval;
798 }
799 static struct target *get_cortex_a(struct target *target, int32_t coreid)
800 {
801 struct target_list *head;
802 struct target *curr;
803
804 head = target->head;
805 while (head != (struct target_list *)NULL) {
806 curr = head->target;
807 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
808 return curr;
809 head = head->next;
810 }
811 return target;
812 }
813 static int cortex_a_halt(struct target *target);
814
815 static int cortex_a_halt_smp(struct target *target)
816 {
817 int retval = 0;
818 struct target_list *head;
819 struct target *curr;
820 head = target->head;
821 while (head != (struct target_list *)NULL) {
822 curr = head->target;
823 if ((curr != target) && (curr->state != TARGET_HALTED))
824 retval += cortex_a_halt(curr);
825 head = head->next;
826 }
827 return retval;
828 }
829
830 static int update_halt_gdb(struct target *target)
831 {
832 int retval = 0;
833 if (target->gdb_service && target->gdb_service->core[0] == -1) {
834 target->gdb_service->target = target;
835 target->gdb_service->core[0] = target->coreid;
836 retval += cortex_a_halt_smp(target);
837 }
838 return retval;
839 }
840
841 /*
842 * Cortex-A Run control
843 */
844
845 static int cortex_a_poll(struct target *target)
846 {
847 int retval = ERROR_OK;
848 uint32_t dscr;
849 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
850 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
851 struct adiv5_dap *swjdp = armv7a->arm.dap;
852 enum target_state prev_target_state = target->state;
853 /* toggle to another core is done by gdb as follow */
854 /* maint packet J core_id */
855 /* continue */
856 /* the next polling trigger an halt event sent to gdb */
857 if ((target->state == TARGET_HALTED) && (target->smp) &&
858 (target->gdb_service) &&
859 (target->gdb_service->target == NULL)) {
860 target->gdb_service->target =
861 get_cortex_a(target, target->gdb_service->core[1]);
862 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
863 return retval;
864 }
865 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
866 armv7a->debug_base + CPUDBG_DSCR, &dscr);
867 if (retval != ERROR_OK)
868 return retval;
869 cortex_a->cpudbg_dscr = dscr;
870
871 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
872 if (prev_target_state != TARGET_HALTED) {
873 /* We have a halting debug event */
874 LOG_DEBUG("Target halted");
875 target->state = TARGET_HALTED;
876 if ((prev_target_state == TARGET_RUNNING)
877 || (prev_target_state == TARGET_UNKNOWN)
878 || (prev_target_state == TARGET_RESET)) {
879 retval = cortex_a_debug_entry(target);
880 if (retval != ERROR_OK)
881 return retval;
882 if (target->smp) {
883 retval = update_halt_gdb(target);
884 if (retval != ERROR_OK)
885 return retval;
886 }
887 target_call_event_callbacks(target,
888 TARGET_EVENT_HALTED);
889 }
890 if (prev_target_state == TARGET_DEBUG_RUNNING) {
891 LOG_DEBUG(" ");
892
893 retval = cortex_a_debug_entry(target);
894 if (retval != ERROR_OK)
895 return retval;
896 if (target->smp) {
897 retval = update_halt_gdb(target);
898 if (retval != ERROR_OK)
899 return retval;
900 }
901
902 target_call_event_callbacks(target,
903 TARGET_EVENT_DEBUG_HALTED);
904 }
905 }
906 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
907 target->state = TARGET_RUNNING;
908 else {
909 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
910 target->state = TARGET_UNKNOWN;
911 }
912
913 return retval;
914 }
915
916 static int cortex_a_halt(struct target *target)
917 {
918 int retval = ERROR_OK;
919 uint32_t dscr;
920 struct armv7a_common *armv7a = target_to_armv7a(target);
921 struct adiv5_dap *swjdp = armv7a->arm.dap;
922
923 /*
924 * Tell the core to be halted by writing DRCR with 0x1
925 * and then wait for the core to be halted.
926 */
927 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
928 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
929 if (retval != ERROR_OK)
930 return retval;
931
932 /*
933 * enter halting debug mode
934 */
935 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
936 armv7a->debug_base + CPUDBG_DSCR, &dscr);
937 if (retval != ERROR_OK)
938 return retval;
939
940 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
941 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
942 if (retval != ERROR_OK)
943 return retval;
944
945 long long then = timeval_ms();
946 for (;; ) {
947 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
948 armv7a->debug_base + CPUDBG_DSCR, &dscr);
949 if (retval != ERROR_OK)
950 return retval;
951 if ((dscr & DSCR_CORE_HALTED) != 0)
952 break;
953 if (timeval_ms() > then + 1000) {
954 LOG_ERROR("Timeout waiting for halt");
955 return ERROR_FAIL;
956 }
957 }
958
959 target->debug_reason = DBG_REASON_DBGRQ;
960
961 return ERROR_OK;
962 }
963
964 static int cortex_a_internal_restore(struct target *target, int current,
965 uint32_t *address, int handle_breakpoints, int debug_execution)
966 {
967 struct armv7a_common *armv7a = target_to_armv7a(target);
968 struct arm *arm = &armv7a->arm;
969 int retval;
970 uint32_t resume_pc;
971
972 if (!debug_execution)
973 target_free_all_working_areas(target);
974
975 #if 0
976 if (debug_execution) {
977 /* Disable interrupts */
978 /* We disable interrupts in the PRIMASK register instead of
979 * masking with C_MASKINTS,
980 * This is probably the same issue as Cortex-M3 Errata 377493:
981 * C_MASKINTS in parallel with disabled interrupts can cause
982 * local faults to not be taken. */
983 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
984 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
985 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
986
987 /* Make sure we are in Thumb mode */
988 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
989 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
990 32) | (1 << 24));
991 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
992 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
993 }
994 #endif
995
996 /* current = 1: continue on current pc, otherwise continue at <address> */
997 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
998 if (!current)
999 resume_pc = *address;
1000 else
1001 *address = resume_pc;
1002
1003 /* Make sure that the Armv7 gdb thumb fixups does not
1004 * kill the return address
1005 */
1006 switch (arm->core_state) {
1007 case ARM_STATE_ARM:
1008 resume_pc &= 0xFFFFFFFC;
1009 break;
1010 case ARM_STATE_THUMB:
1011 case ARM_STATE_THUMB_EE:
1012 /* When the return address is loaded into PC
1013 * bit 0 must be 1 to stay in Thumb state
1014 */
1015 resume_pc |= 0x1;
1016 break;
1017 case ARM_STATE_JAZELLE:
1018 LOG_ERROR("How do I resume into Jazelle state??");
1019 return ERROR_FAIL;
1020 }
1021 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1022 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1023 arm->pc->dirty = 1;
1024 arm->pc->valid = 1;
1025 /* restore dpm_mode at system halt */
1026 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1027 /* called it now before restoring context because it uses cpu
1028 * register r0 for restoring cp15 control register */
1029 retval = cortex_a_restore_cp15_control_reg(target);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 retval = cortex_a_restore_context(target, handle_breakpoints);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 target->debug_reason = DBG_REASON_NOTHALTED;
1036 target->state = TARGET_RUNNING;
1037
1038 /* registers are now invalid */
1039 register_cache_invalidate(arm->core_cache);
1040
1041 #if 0
1042 /* the front-end may request us not to handle breakpoints */
1043 if (handle_breakpoints) {
1044 /* Single step past breakpoint at current address */
1045 breakpoint = breakpoint_find(target, resume_pc);
1046 if (breakpoint) {
1047 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1048 cortex_m3_unset_breakpoint(target, breakpoint);
1049 cortex_m3_single_step_core(target);
1050 cortex_m3_set_breakpoint(target, breakpoint);
1051 }
1052 }
1053
1054 #endif
1055 return retval;
1056 }
1057
1058 static int cortex_a_internal_restart(struct target *target)
1059 {
1060 struct armv7a_common *armv7a = target_to_armv7a(target);
1061 struct arm *arm = &armv7a->arm;
1062 struct adiv5_dap *swjdp = arm->dap;
1063 int retval;
1064 uint32_t dscr;
1065 /*
1066 * * Restart core and wait for it to be started. Clear ITRen and sticky
1067 * * exception flags: see ARMv7 ARM, C5.9.
1068 *
1069 * REVISIT: for single stepping, we probably want to
1070 * disable IRQs by default, with optional override...
1071 */
1072
1073 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1074 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 if ((dscr & DSCR_INSTR_COMP) == 0)
1079 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1080
1081 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1082 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1083 if (retval != ERROR_OK)
1084 return retval;
1085
1086 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1087 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1088 DRCR_CLEAR_EXCEPTIONS);
1089 if (retval != ERROR_OK)
1090 return retval;
1091
1092 long long then = timeval_ms();
1093 for (;; ) {
1094 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1095 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1096 if (retval != ERROR_OK)
1097 return retval;
1098 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1099 break;
1100 if (timeval_ms() > then + 1000) {
1101 LOG_ERROR("Timeout waiting for resume");
1102 return ERROR_FAIL;
1103 }
1104 }
1105
1106 target->debug_reason = DBG_REASON_NOTHALTED;
1107 target->state = TARGET_RUNNING;
1108
1109 /* registers are now invalid */
1110 register_cache_invalidate(arm->core_cache);
1111
1112 return ERROR_OK;
1113 }
1114
1115 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1116 {
1117 int retval = 0;
1118 struct target_list *head;
1119 struct target *curr;
1120 uint32_t address;
1121 head = target->head;
1122 while (head != (struct target_list *)NULL) {
1123 curr = head->target;
1124 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1125 /* resume current address , not in step mode */
1126 retval += cortex_a_internal_restore(curr, 1, &address,
1127 handle_breakpoints, 0);
1128 retval += cortex_a_internal_restart(curr);
1129 }
1130 head = head->next;
1131
1132 }
1133 return retval;
1134 }
1135
1136 static int cortex_a_resume(struct target *target, int current,
1137 uint32_t address, int handle_breakpoints, int debug_execution)
1138 {
1139 int retval = 0;
1140 /* dummy resume for smp toggle in order to reduce gdb impact */
1141 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1142 /* simulate a start and halt of target */
1143 target->gdb_service->target = NULL;
1144 target->gdb_service->core[0] = target->gdb_service->core[1];
1145 /* fake resume at next poll we play the target core[1], see poll*/
1146 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1147 return 0;
1148 }
1149 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1150 if (target->smp) {
1151 target->gdb_service->core[0] = -1;
1152 retval = cortex_a_restore_smp(target, handle_breakpoints);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 }
1156 cortex_a_internal_restart(target);
1157
1158 if (!debug_execution) {
1159 target->state = TARGET_RUNNING;
1160 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1161 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1162 } else {
1163 target->state = TARGET_DEBUG_RUNNING;
1164 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1165 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1166 }
1167
1168 return ERROR_OK;
1169 }
1170
1171 static int cortex_a_debug_entry(struct target *target)
1172 {
1173 int i;
1174 uint32_t regfile[16], cpsr, dscr;
1175 int retval = ERROR_OK;
1176 struct working_area *regfile_working_area = NULL;
1177 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1178 struct armv7a_common *armv7a = target_to_armv7a(target);
1179 struct arm *arm = &armv7a->arm;
1180 struct adiv5_dap *swjdp = armv7a->arm.dap;
1181 struct reg *reg;
1182
1183 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1184
1185 /* REVISIT surely we should not re-read DSCR !! */
1186 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1187 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1188 if (retval != ERROR_OK)
1189 return retval;
1190
1191 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1192 * imprecise data aborts get discarded by issuing a Data
1193 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1194 */
1195
1196 /* Enable the ITR execution once we are in debug mode */
1197 dscr |= DSCR_ITR_EN;
1198 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1199 armv7a->debug_base + CPUDBG_DSCR, dscr);
1200 if (retval != ERROR_OK)
1201 return retval;
1202
1203 /* Examine debug reason */
1204 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1205
1206 /* save address of instruction that triggered the watchpoint? */
1207 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1208 uint32_t wfar;
1209
1210 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1211 armv7a->debug_base + CPUDBG_WFAR,
1212 &wfar);
1213 if (retval != ERROR_OK)
1214 return retval;
1215 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1216 }
1217
1218 /* REVISIT fast_reg_read is never set ... */
1219
1220 /* Examine target state and mode */
1221 if (cortex_a->fast_reg_read)
1222 target_alloc_working_area(target, 64, &regfile_working_area);
1223
1224 /* First load register acessible through core debug port*/
1225 if (!regfile_working_area)
1226 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1227 else {
1228 retval = cortex_a_read_regs_through_mem(target,
1229 regfile_working_area->address, regfile);
1230
1231 target_free_working_area(target, regfile_working_area);
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 /* read Current PSR */
1236 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1237 /* store current cpsr */
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1242
1243 arm_set_cpsr(arm, cpsr);
1244
1245 /* update cache */
1246 for (i = 0; i <= ARM_PC; i++) {
1247 reg = arm_reg_current(arm, i);
1248
1249 buf_set_u32(reg->value, 0, 32, regfile[i]);
1250 reg->valid = 1;
1251 reg->dirty = 0;
1252 }
1253
1254 /* Fixup PC Resume Address */
1255 if (cpsr & (1 << 5)) {
1256 /* T bit set for Thumb or ThumbEE state */
1257 regfile[ARM_PC] -= 4;
1258 } else {
1259 /* ARM state */
1260 regfile[ARM_PC] -= 8;
1261 }
1262
1263 reg = arm->pc;
1264 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1265 reg->dirty = reg->valid;
1266 }
1267
1268 #if 0
1269 /* TODO, Move this */
1270 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1271 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1272 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1273
1274 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1275 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1276
1277 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1278 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1279 #endif
1280
1281 /* Are we in an exception handler */
1282 /* armv4_5->exception_number = 0; */
1283 if (armv7a->post_debug_entry) {
1284 retval = armv7a->post_debug_entry(target);
1285 if (retval != ERROR_OK)
1286 return retval;
1287 }
1288
1289 return retval;
1290 }
1291
1292 static int cortex_a_post_debug_entry(struct target *target)
1293 {
1294 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1295 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1296 int retval;
1297
1298 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1299 retval = armv7a->arm.mrc(target, 15,
1300 0, 0, /* op1, op2 */
1301 1, 0, /* CRn, CRm */
1302 &cortex_a->cp15_control_reg);
1303 if (retval != ERROR_OK)
1304 return retval;
1305 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1306 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1307
1308 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1309 armv7a_identify_cache(target);
1310
1311 if (armv7a->is_armv7r) {
1312 armv7a->armv7a_mmu.mmu_enabled = 0;
1313 } else {
1314 armv7a->armv7a_mmu.mmu_enabled =
1315 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1316 }
1317 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1318 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1319 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1320 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1321 cortex_a->curr_mode = armv7a->arm.core_mode;
1322
1323 return ERROR_OK;
1324 }
1325
1326 static int cortex_a_step(struct target *target, int current, uint32_t address,
1327 int handle_breakpoints)
1328 {
1329 struct armv7a_common *armv7a = target_to_armv7a(target);
1330 struct arm *arm = &armv7a->arm;
1331 struct breakpoint *breakpoint = NULL;
1332 struct breakpoint stepbreakpoint;
1333 struct reg *r;
1334 int retval;
1335
1336 if (target->state != TARGET_HALTED) {
1337 LOG_WARNING("target not halted");
1338 return ERROR_TARGET_NOT_HALTED;
1339 }
1340
1341 /* current = 1: continue on current pc, otherwise continue at <address> */
1342 r = arm->pc;
1343 if (!current)
1344 buf_set_u32(r->value, 0, 32, address);
1345 else
1346 address = buf_get_u32(r->value, 0, 32);
1347
1348 /* The front-end may request us not to handle breakpoints.
1349 * But since Cortex-A uses breakpoint for single step,
1350 * we MUST handle breakpoints.
1351 */
1352 handle_breakpoints = 1;
1353 if (handle_breakpoints) {
1354 breakpoint = breakpoint_find(target, address);
1355 if (breakpoint)
1356 cortex_a_unset_breakpoint(target, breakpoint);
1357 }
1358
1359 /* Setup single step breakpoint */
1360 stepbreakpoint.address = address;
1361 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1362 ? 2 : 4;
1363 stepbreakpoint.type = BKPT_HARD;
1364 stepbreakpoint.set = 0;
1365
1366 /* Break on IVA mismatch */
1367 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1368
1369 target->debug_reason = DBG_REASON_SINGLESTEP;
1370
1371 retval = cortex_a_resume(target, 1, address, 0, 0);
1372 if (retval != ERROR_OK)
1373 return retval;
1374
1375 long long then = timeval_ms();
1376 while (target->state != TARGET_HALTED) {
1377 retval = cortex_a_poll(target);
1378 if (retval != ERROR_OK)
1379 return retval;
1380 if (timeval_ms() > then + 1000) {
1381 LOG_ERROR("timeout waiting for target halt");
1382 return ERROR_FAIL;
1383 }
1384 }
1385
1386 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1387
1388 target->debug_reason = DBG_REASON_BREAKPOINT;
1389
1390 if (breakpoint)
1391 cortex_a_set_breakpoint(target, breakpoint, 0);
1392
1393 if (target->state != TARGET_HALTED)
1394 LOG_DEBUG("target stepped");
1395
1396 return ERROR_OK;
1397 }
1398
1399 static int cortex_a_restore_context(struct target *target, bool bpwp)
1400 {
1401 struct armv7a_common *armv7a = target_to_armv7a(target);
1402
1403 LOG_DEBUG(" ");
1404
1405 if (armv7a->pre_restore_context)
1406 armv7a->pre_restore_context(target);
1407
1408 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1409 }
1410
1411 /*
1412 * Cortex-A Breakpoint and watchpoint functions
1413 */
1414
1415 /* Setup hardware Breakpoint Register Pair */
1416 static int cortex_a_set_breakpoint(struct target *target,
1417 struct breakpoint *breakpoint, uint8_t matchmode)
1418 {
1419 int retval;
1420 int brp_i = 0;
1421 uint32_t control;
1422 uint8_t byte_addr_select = 0x0F;
1423 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1424 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1425 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1426
1427 if (breakpoint->set) {
1428 LOG_WARNING("breakpoint already set");
1429 return ERROR_OK;
1430 }
1431
1432 if (breakpoint->type == BKPT_HARD) {
1433 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1434 brp_i++;
1435 if (brp_i >= cortex_a->brp_num) {
1436 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1437 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1438 }
1439 breakpoint->set = brp_i + 1;
1440 if (breakpoint->length == 2)
1441 byte_addr_select = (3 << (breakpoint->address & 0x02));
1442 control = ((matchmode & 0x7) << 20)
1443 | (byte_addr_select << 5)
1444 | (3 << 1) | 1;
1445 brp_list[brp_i].used = 1;
1446 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1447 brp_list[brp_i].control = control;
1448 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1449 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1450 brp_list[brp_i].value);
1451 if (retval != ERROR_OK)
1452 return retval;
1453 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1454 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1455 brp_list[brp_i].control);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1459 brp_list[brp_i].control,
1460 brp_list[brp_i].value);
1461 } else if (breakpoint->type == BKPT_SOFT) {
1462 uint8_t code[4];
1463 if (breakpoint->length == 2)
1464 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1465 else
1466 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1467 retval = target_read_memory(target,
1468 breakpoint->address & 0xFFFFFFFE,
1469 breakpoint->length, 1,
1470 breakpoint->orig_instr);
1471 if (retval != ERROR_OK)
1472 return retval;
1473 retval = target_write_memory(target,
1474 breakpoint->address & 0xFFFFFFFE,
1475 breakpoint->length, 1, code);
1476 if (retval != ERROR_OK)
1477 return retval;
1478 breakpoint->set = 0x11; /* Any nice value but 0 */
1479 }
1480
1481 return ERROR_OK;
1482 }
1483
1484 static int cortex_a_set_context_breakpoint(struct target *target,
1485 struct breakpoint *breakpoint, uint8_t matchmode)
1486 {
1487 int retval = ERROR_FAIL;
1488 int brp_i = 0;
1489 uint32_t control;
1490 uint8_t byte_addr_select = 0x0F;
1491 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1492 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1493 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1494
1495 if (breakpoint->set) {
1496 LOG_WARNING("breakpoint already set");
1497 return retval;
1498 }
1499 /*check available context BRPs*/
1500 while ((brp_list[brp_i].used ||
1501 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1502 brp_i++;
1503
1504 if (brp_i >= cortex_a->brp_num) {
1505 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1506 return ERROR_FAIL;
1507 }
1508
1509 breakpoint->set = brp_i + 1;
1510 control = ((matchmode & 0x7) << 20)
1511 | (byte_addr_select << 5)
1512 | (3 << 1) | 1;
1513 brp_list[brp_i].used = 1;
1514 brp_list[brp_i].value = (breakpoint->asid);
1515 brp_list[brp_i].control = control;
1516 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1517 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1518 brp_list[brp_i].value);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1522 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1523 brp_list[brp_i].control);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1527 brp_list[brp_i].control,
1528 brp_list[brp_i].value);
1529 return ERROR_OK;
1530
1531 }
1532
1533 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1534 {
1535 int retval = ERROR_FAIL;
1536 int brp_1 = 0; /* holds the contextID pair */
1537 int brp_2 = 0; /* holds the IVA pair */
1538 uint32_t control_CTX, control_IVA;
1539 uint8_t CTX_byte_addr_select = 0x0F;
1540 uint8_t IVA_byte_addr_select = 0x0F;
1541 uint8_t CTX_machmode = 0x03;
1542 uint8_t IVA_machmode = 0x01;
1543 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1544 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1545 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1546
1547 if (breakpoint->set) {
1548 LOG_WARNING("breakpoint already set");
1549 return retval;
1550 }
1551 /*check available context BRPs*/
1552 while ((brp_list[brp_1].used ||
1553 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1554 brp_1++;
1555
1556 printf("brp(CTX) found num: %d\n", brp_1);
1557 if (brp_1 >= cortex_a->brp_num) {
1558 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1559 return ERROR_FAIL;
1560 }
1561
1562 while ((brp_list[brp_2].used ||
1563 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1564 brp_2++;
1565
1566 printf("brp(IVA) found num: %d\n", brp_2);
1567 if (brp_2 >= cortex_a->brp_num) {
1568 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1569 return ERROR_FAIL;
1570 }
1571
1572 breakpoint->set = brp_1 + 1;
1573 breakpoint->linked_BRP = brp_2;
1574 control_CTX = ((CTX_machmode & 0x7) << 20)
1575 | (brp_2 << 16)
1576 | (0 << 14)
1577 | (CTX_byte_addr_select << 5)
1578 | (3 << 1) | 1;
1579 brp_list[brp_1].used = 1;
1580 brp_list[brp_1].value = (breakpoint->asid);
1581 brp_list[brp_1].control = control_CTX;
1582 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1583 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1584 brp_list[brp_1].value);
1585 if (retval != ERROR_OK)
1586 return retval;
1587 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1588 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1589 brp_list[brp_1].control);
1590 if (retval != ERROR_OK)
1591 return retval;
1592
1593 control_IVA = ((IVA_machmode & 0x7) << 20)
1594 | (brp_1 << 16)
1595 | (IVA_byte_addr_select << 5)
1596 | (3 << 1) | 1;
1597 brp_list[brp_2].used = 1;
1598 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1599 brp_list[brp_2].control = control_IVA;
1600 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1601 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1602 brp_list[brp_2].value);
1603 if (retval != ERROR_OK)
1604 return retval;
1605 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1606 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1607 brp_list[brp_2].control);
1608 if (retval != ERROR_OK)
1609 return retval;
1610
1611 return ERROR_OK;
1612 }
1613
1614 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1615 {
1616 int retval;
1617 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1618 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1619 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1620
1621 if (!breakpoint->set) {
1622 LOG_WARNING("breakpoint not set");
1623 return ERROR_OK;
1624 }
1625
1626 if (breakpoint->type == BKPT_HARD) {
1627 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1628 int brp_i = breakpoint->set - 1;
1629 int brp_j = breakpoint->linked_BRP;
1630 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1631 LOG_DEBUG("Invalid BRP number in breakpoint");
1632 return ERROR_OK;
1633 }
1634 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1635 brp_list[brp_i].control, brp_list[brp_i].value);
1636 brp_list[brp_i].used = 0;
1637 brp_list[brp_i].value = 0;
1638 brp_list[brp_i].control = 0;
1639 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1640 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1641 brp_list[brp_i].control);
1642 if (retval != ERROR_OK)
1643 return retval;
1644 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1645 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1646 brp_list[brp_i].value);
1647 if (retval != ERROR_OK)
1648 return retval;
1649 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1650 LOG_DEBUG("Invalid BRP number in breakpoint");
1651 return ERROR_OK;
1652 }
1653 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1654 brp_list[brp_j].control, brp_list[brp_j].value);
1655 brp_list[brp_j].used = 0;
1656 brp_list[brp_j].value = 0;
1657 brp_list[brp_j].control = 0;
1658 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1659 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1660 brp_list[brp_j].control);
1661 if (retval != ERROR_OK)
1662 return retval;
1663 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1664 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1665 brp_list[brp_j].value);
1666 if (retval != ERROR_OK)
1667 return retval;
1668 breakpoint->linked_BRP = 0;
1669 breakpoint->set = 0;
1670 return ERROR_OK;
1671
1672 } else {
1673 int brp_i = breakpoint->set - 1;
1674 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1675 LOG_DEBUG("Invalid BRP number in breakpoint");
1676 return ERROR_OK;
1677 }
1678 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1679 brp_list[brp_i].control, brp_list[brp_i].value);
1680 brp_list[brp_i].used = 0;
1681 brp_list[brp_i].value = 0;
1682 brp_list[brp_i].control = 0;
1683 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1684 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1685 brp_list[brp_i].control);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1689 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1690 brp_list[brp_i].value);
1691 if (retval != ERROR_OK)
1692 return retval;
1693 breakpoint->set = 0;
1694 return ERROR_OK;
1695 }
1696 } else {
1697 /* restore original instruction (kept in target endianness) */
1698 if (breakpoint->length == 4) {
1699 retval = target_write_memory(target,
1700 breakpoint->address & 0xFFFFFFFE,
1701 4, 1, breakpoint->orig_instr);
1702 if (retval != ERROR_OK)
1703 return retval;
1704 } else {
1705 retval = target_write_memory(target,
1706 breakpoint->address & 0xFFFFFFFE,
1707 2, 1, breakpoint->orig_instr);
1708 if (retval != ERROR_OK)
1709 return retval;
1710 }
1711 }
1712 breakpoint->set = 0;
1713
1714 return ERROR_OK;
1715 }
1716
1717 static int cortex_a_add_breakpoint(struct target *target,
1718 struct breakpoint *breakpoint)
1719 {
1720 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1721
1722 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1723 LOG_INFO("no hardware breakpoint available");
1724 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1725 }
1726
1727 if (breakpoint->type == BKPT_HARD)
1728 cortex_a->brp_num_available--;
1729
1730 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1731 }
1732
1733 static int cortex_a_add_context_breakpoint(struct target *target,
1734 struct breakpoint *breakpoint)
1735 {
1736 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1737
1738 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1739 LOG_INFO("no hardware breakpoint available");
1740 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1741 }
1742
1743 if (breakpoint->type == BKPT_HARD)
1744 cortex_a->brp_num_available--;
1745
1746 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1747 }
1748
1749 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1750 struct breakpoint *breakpoint)
1751 {
1752 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1753
1754 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1755 LOG_INFO("no hardware breakpoint available");
1756 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1757 }
1758
1759 if (breakpoint->type == BKPT_HARD)
1760 cortex_a->brp_num_available--;
1761
1762 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1763 }
1764
1765
1766 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1767 {
1768 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1769
1770 #if 0
1771 /* It is perfectly possible to remove breakpoints while the target is running */
1772 if (target->state != TARGET_HALTED) {
1773 LOG_WARNING("target not halted");
1774 return ERROR_TARGET_NOT_HALTED;
1775 }
1776 #endif
1777
1778 if (breakpoint->set) {
1779 cortex_a_unset_breakpoint(target, breakpoint);
1780 if (breakpoint->type == BKPT_HARD)
1781 cortex_a->brp_num_available++;
1782 }
1783
1784
1785 return ERROR_OK;
1786 }
1787
1788 /*
1789 * Cortex-A Reset functions
1790 */
1791
1792 static int cortex_a_assert_reset(struct target *target)
1793 {
1794 struct armv7a_common *armv7a = target_to_armv7a(target);
1795
1796 LOG_DEBUG(" ");
1797
1798 /* FIXME when halt is requested, make it work somehow... */
1799
1800 /* Issue some kind of warm reset. */
1801 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1802 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1803 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1804 /* REVISIT handle "pulls" cases, if there's
1805 * hardware that needs them to work.
1806 */
1807 jtag_add_reset(0, 1);
1808 } else {
1809 LOG_ERROR("%s: how to reset?", target_name(target));
1810 return ERROR_FAIL;
1811 }
1812
1813 /* registers are now invalid */
1814 register_cache_invalidate(armv7a->arm.core_cache);
1815
1816 target->state = TARGET_RESET;
1817
1818 return ERROR_OK;
1819 }
1820
1821 static int cortex_a_deassert_reset(struct target *target)
1822 {
1823 int retval;
1824
1825 LOG_DEBUG(" ");
1826
1827 /* be certain SRST is off */
1828 jtag_add_reset(0, 0);
1829
1830 retval = cortex_a_poll(target);
1831 if (retval != ERROR_OK)
1832 return retval;
1833
1834 if (target->reset_halt) {
1835 if (target->state != TARGET_HALTED) {
1836 LOG_WARNING("%s: ran after reset and before halt ...",
1837 target_name(target));
1838 retval = target_halt(target);
1839 if (retval != ERROR_OK)
1840 return retval;
1841 }
1842 }
1843
1844 return ERROR_OK;
1845 }
1846
1847 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1848 {
1849 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1850 * New desired mode must be in mode. Current value of DSCR must be in
1851 * *dscr, which is updated with new value.
1852 *
1853 * This function elides actually sending the mode-change over the debug
1854 * interface if the mode is already set as desired.
1855 */
1856 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1857 if (new_dscr != *dscr) {
1858 struct armv7a_common *armv7a = target_to_armv7a(target);
1859 int retval = mem_ap_sel_write_atomic_u32(armv7a->arm.dap,
1860 armv7a->debug_ap, armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1861 if (retval == ERROR_OK)
1862 *dscr = new_dscr;
1863 return retval;
1864 } else {
1865 return ERROR_OK;
1866 }
1867 }
1868
1869 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1870 uint32_t value, uint32_t *dscr)
1871 {
1872 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1873 struct armv7a_common *armv7a = target_to_armv7a(target);
1874 struct adiv5_dap *swjdp = armv7a->arm.dap;
1875 long long then = timeval_ms();
1876 int retval;
1877
1878 while ((*dscr & mask) != value) {
1879 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1880 armv7a->debug_base + CPUDBG_DSCR, dscr);
1881 if (retval != ERROR_OK)
1882 return retval;
1883 if (timeval_ms() > then + 1000) {
1884 LOG_ERROR("timeout waiting for DSCR bit change");
1885 return ERROR_FAIL;
1886 }
1887 }
1888 return ERROR_OK;
1889 }
1890
1891 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1892 uint32_t *data, uint32_t *dscr)
1893 {
1894 int retval;
1895 struct armv7a_common *armv7a = target_to_armv7a(target);
1896 struct adiv5_dap *swjdp = armv7a->arm.dap;
1897
1898 /* Move from coprocessor to R0. */
1899 retval = cortex_a_exec_opcode(target, opcode, dscr);
1900 if (retval != ERROR_OK)
1901 return retval;
1902
1903 /* Move from R0 to DTRTX. */
1904 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1905 if (retval != ERROR_OK)
1906 return retval;
1907
1908 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1909 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1910 * must also check TXfull_l). Most of the time this will be free
1911 * because TXfull_l will be set immediately and cached in dscr. */
1912 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1913 DSCR_DTRTX_FULL_LATCHED, dscr);
1914 if (retval != ERROR_OK)
1915 return retval;
1916
1917 /* Read the value transferred to DTRTX. */
1918 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1919 armv7a->debug_base + CPUDBG_DTRTX, data);
1920 if (retval != ERROR_OK)
1921 return retval;
1922
1923 return ERROR_OK;
1924 }
1925
1926 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1927 uint32_t *dfsr, uint32_t *dscr)
1928 {
1929 int retval;
1930
1931 if (dfar) {
1932 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1933 if (retval != ERROR_OK)
1934 return retval;
1935 }
1936
1937 if (dfsr) {
1938 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1939 if (retval != ERROR_OK)
1940 return retval;
1941 }
1942
1943 return ERROR_OK;
1944 }
1945
1946 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1947 uint32_t data, uint32_t *dscr)
1948 {
1949 int retval;
1950 struct armv7a_common *armv7a = target_to_armv7a(target);
1951 struct adiv5_dap *swjdp = armv7a->arm.dap;
1952
1953 /* Write the value into DTRRX. */
1954 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1955 armv7a->debug_base + CPUDBG_DTRRX, data);
1956 if (retval != ERROR_OK)
1957 return retval;
1958
1959 /* Move from DTRRX to R0. */
1960 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1961 if (retval != ERROR_OK)
1962 return retval;
1963
1964 /* Move from R0 to coprocessor. */
1965 retval = cortex_a_exec_opcode(target, opcode, dscr);
1966 if (retval != ERROR_OK)
1967 return retval;
1968
1969 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1970 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1971 * check RXfull_l). Most of the time this will be free because RXfull_l
1972 * will be cleared immediately and cached in dscr. */
1973 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1974 if (retval != ERROR_OK)
1975 return retval;
1976
1977 return ERROR_OK;
1978 }
1979
1980 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1981 uint32_t dfsr, uint32_t *dscr)
1982 {
1983 int retval;
1984
1985 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1986 if (retval != ERROR_OK)
1987 return retval;
1988
1989 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1990 if (retval != ERROR_OK)
1991 return retval;
1992
1993 return ERROR_OK;
1994 }
1995
1996 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1997 {
1998 uint32_t status, upper4;
1999
2000 if (dfsr & (1 << 9)) {
2001 /* LPAE format. */
2002 status = dfsr & 0x3f;
2003 upper4 = status >> 2;
2004 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2005 return ERROR_TARGET_TRANSLATION_FAULT;
2006 else if (status == 33)
2007 return ERROR_TARGET_UNALIGNED_ACCESS;
2008 else
2009 return ERROR_TARGET_DATA_ABORT;
2010 } else {
2011 /* Normal format. */
2012 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2013 if (status == 1)
2014 return ERROR_TARGET_UNALIGNED_ACCESS;
2015 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2016 status == 9 || status == 11 || status == 13 || status == 15)
2017 return ERROR_TARGET_TRANSLATION_FAULT;
2018 else
2019 return ERROR_TARGET_DATA_ABORT;
2020 }
2021 }
2022
2023 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2024 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2025 {
2026 /* Writes count objects of size size from *buffer. Old value of DSCR must
2027 * be in *dscr; updated to new value. This is slow because it works for
2028 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2029 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2030 * preferred.
2031 * Preconditions:
2032 * - Address is in R0.
2033 * - R0 is marked dirty.
2034 */
2035 struct armv7a_common *armv7a = target_to_armv7a(target);
2036 struct adiv5_dap *swjdp = armv7a->arm.dap;
2037 struct arm *arm = &armv7a->arm;
2038 int retval;
2039
2040 /* Mark register R1 as dirty, to use for transferring data. */
2041 arm_reg_current(arm, 1)->dirty = true;
2042
2043 /* Switch to non-blocking mode if not already in that mode. */
2044 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2045 if (retval != ERROR_OK)
2046 return retval;
2047
2048 /* Go through the objects. */
2049 while (count) {
2050 /* Write the value to store into DTRRX. */
2051 uint32_t data, opcode;
2052 if (size == 1)
2053 data = *buffer;
2054 else if (size == 2)
2055 data = target_buffer_get_u16(target, buffer);
2056 else
2057 data = target_buffer_get_u32(target, buffer);
2058 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2059 armv7a->debug_base + CPUDBG_DTRRX, data);
2060 if (retval != ERROR_OK)
2061 return retval;
2062
2063 /* Transfer the value from DTRRX to R1. */
2064 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2065 if (retval != ERROR_OK)
2066 return retval;
2067
2068 /* Write the value transferred to R1 into memory. */
2069 if (size == 1)
2070 opcode = ARMV4_5_STRB_IP(1, 0);
2071 else if (size == 2)
2072 opcode = ARMV4_5_STRH_IP(1, 0);
2073 else
2074 opcode = ARMV4_5_STRW_IP(1, 0);
2075 retval = cortex_a_exec_opcode(target, opcode, dscr);
2076 if (retval != ERROR_OK)
2077 return retval;
2078
2079 /* Check for faults and return early. */
2080 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2081 return ERROR_OK; /* A data fault is not considered a system failure. */
2082
2083 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2084 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2085 * must also check RXfull_l). Most of the time this will be free
2086 * because RXfull_l will be cleared immediately and cached in dscr. */
2087 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2088 if (retval != ERROR_OK)
2089 return retval;
2090
2091 /* Advance. */
2092 buffer += size;
2093 --count;
2094 }
2095
2096 return ERROR_OK;
2097 }
2098
2099 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2100 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2101 {
2102 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2103 * in *dscr; updated to new value. This is fast but only works for
2104 * word-sized objects at aligned addresses.
2105 * Preconditions:
2106 * - Address is in R0 and must be a multiple of 4.
2107 * - R0 is marked dirty.
2108 */
2109 struct armv7a_common *armv7a = target_to_armv7a(target);
2110 struct adiv5_dap *swjdp = armv7a->arm.dap;
2111 int retval;
2112
2113 /* Switch to fast mode if not already in that mode. */
2114 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2115 if (retval != ERROR_OK)
2116 return retval;
2117
2118 /* Latch STC instruction. */
2119 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2120 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 /* Transfer all the data and issue all the instructions. */
2125 return mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2126 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2127 }
2128
2129 static int cortex_a_write_apb_ab_memory(struct target *target,
2130 uint32_t address, uint32_t size,
2131 uint32_t count, const uint8_t *buffer)
2132 {
2133 /* Write memory through APB-AP. */
2134 int retval, final_retval;
2135 struct armv7a_common *armv7a = target_to_armv7a(target);
2136 struct adiv5_dap *swjdp = armv7a->arm.dap;
2137 struct arm *arm = &armv7a->arm;
2138 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2139
2140 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2141 address, size, count);
2142 if (target->state != TARGET_HALTED) {
2143 LOG_WARNING("target not halted");
2144 return ERROR_TARGET_NOT_HALTED;
2145 }
2146
2147 if (!count)
2148 return ERROR_OK;
2149
2150 /* Clear any abort. */
2151 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2152 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2153 if (retval != ERROR_OK)
2154 return retval;
2155
2156 /* Read DSCR. */
2157 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2158 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162 /* Switch to non-blocking mode if not already in that mode. */
2163 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2164 if (retval != ERROR_OK)
2165 goto out;
2166
2167 /* Mark R0 as dirty. */
2168 arm_reg_current(arm, 0)->dirty = true;
2169
2170 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2171 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2172 if (retval != ERROR_OK)
2173 goto out;
2174
2175 /* Get the memory address into R0. */
2176 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2177 armv7a->debug_base + CPUDBG_DTRRX, address);
2178 if (retval != ERROR_OK)
2179 goto out;
2180 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2181 if (retval != ERROR_OK)
2182 goto out;
2183
2184 if (size == 4 && (address % 4) == 0) {
2185 /* We are doing a word-aligned transfer, so use fast mode. */
2186 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2187 } else {
2188 /* Use slow path. */
2189 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2190 }
2191
2192 out:
2193 final_retval = retval;
2194
2195 /* Switch to non-blocking mode if not already in that mode. */
2196 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2197 if (final_retval == ERROR_OK)
2198 final_retval = retval;
2199
2200 /* Wait for last issued instruction to complete. */
2201 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2202 if (final_retval == ERROR_OK)
2203 final_retval = retval;
2204
2205 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2206 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2207 * check RXfull_l). Most of the time this will be free because RXfull_l
2208 * will be cleared immediately and cached in dscr. However, don’t do this
2209 * if there is fault, because then the instruction might not have completed
2210 * successfully. */
2211 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2212 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2213 if (retval != ERROR_OK)
2214 return retval;
2215 }
2216
2217 /* If there were any sticky abort flags, clear them. */
2218 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2219 fault_dscr = dscr;
2220 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2221 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2222 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2223 } else {
2224 fault_dscr = 0;
2225 }
2226
2227 /* Handle synchronous data faults. */
2228 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2229 if (final_retval == ERROR_OK) {
2230 /* Final return value will reflect cause of fault. */
2231 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2232 if (retval == ERROR_OK) {
2233 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2234 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2235 } else
2236 final_retval = retval;
2237 }
2238 /* Fault destroyed DFAR/DFSR; restore them. */
2239 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2240 if (retval != ERROR_OK)
2241 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2242 }
2243
2244 /* Handle asynchronous data faults. */
2245 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2246 if (final_retval == ERROR_OK)
2247 /* No other error has been recorded so far, so keep this one. */
2248 final_retval = ERROR_TARGET_DATA_ABORT;
2249 }
2250
2251 /* If the DCC is nonempty, clear it. */
2252 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2253 uint32_t dummy;
2254 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2255 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2256 if (final_retval == ERROR_OK)
2257 final_retval = retval;
2258 }
2259 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2260 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2261 if (final_retval == ERROR_OK)
2262 final_retval = retval;
2263 }
2264
2265 /* Done. */
2266 return final_retval;
2267 }
2268
2269 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2270 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2271 {
2272 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2273 * in *dscr; updated to new value. This is slow because it works for
2274 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2275 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2276 * preferred.
2277 * Preconditions:
2278 * - Address is in R0.
2279 * - R0 is marked dirty.
2280 */
2281 struct armv7a_common *armv7a = target_to_armv7a(target);
2282 struct adiv5_dap *swjdp = armv7a->arm.dap;
2283 struct arm *arm = &armv7a->arm;
2284 int retval;
2285
2286 /* Mark register R1 as dirty, to use for transferring data. */
2287 arm_reg_current(arm, 1)->dirty = true;
2288
2289 /* Switch to non-blocking mode if not already in that mode. */
2290 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2291 if (retval != ERROR_OK)
2292 return retval;
2293
2294 /* Go through the objects. */
2295 while (count) {
2296 /* Issue a load of the appropriate size to R1. */
2297 uint32_t opcode, data;
2298 if (size == 1)
2299 opcode = ARMV4_5_LDRB_IP(1, 0);
2300 else if (size == 2)
2301 opcode = ARMV4_5_LDRH_IP(1, 0);
2302 else
2303 opcode = ARMV4_5_LDRW_IP(1, 0);
2304 retval = cortex_a_exec_opcode(target, opcode, dscr);
2305 if (retval != ERROR_OK)
2306 return retval;
2307
2308 /* Issue a write of R1 to DTRTX. */
2309 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2310 if (retval != ERROR_OK)
2311 return retval;
2312
2313 /* Check for faults and return early. */
2314 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2315 return ERROR_OK; /* A data fault is not considered a system failure. */
2316
2317 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2318 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2319 * must also check TXfull_l). Most of the time this will be free
2320 * because TXfull_l will be set immediately and cached in dscr. */
2321 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2322 DSCR_DTRTX_FULL_LATCHED, dscr);
2323 if (retval != ERROR_OK)
2324 return retval;
2325
2326 /* Read the value transferred to DTRTX into the buffer. */
2327 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2328 armv7a->debug_base + CPUDBG_DTRTX, &data);
2329 if (retval != ERROR_OK)
2330 return retval;
2331 if (size == 1)
2332 *buffer = (uint8_t) data;
2333 else if (size == 2)
2334 target_buffer_set_u16(target, buffer, (uint16_t) data);
2335 else
2336 target_buffer_set_u32(target, buffer, data);
2337
2338 /* Advance. */
2339 buffer += size;
2340 --count;
2341 }
2342
2343 return ERROR_OK;
2344 }
2345
2346 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2347 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2348 {
2349 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2350 * *dscr; updated to new value. This is fast but only works for word-sized
2351 * objects at aligned addresses.
2352 * Preconditions:
2353 * - Address is in R0 and must be a multiple of 4.
2354 * - R0 is marked dirty.
2355 */
2356 struct armv7a_common *armv7a = target_to_armv7a(target);
2357 struct adiv5_dap *swjdp = armv7a->arm.dap;
2358 uint32_t new_dscr, u32;
2359 int retval;
2360
2361 /* Switch to non-blocking mode if not already in that mode. */
2362 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2363 if (retval != ERROR_OK)
2364 return retval;
2365
2366 if (count > 1) {
2367 /* Consecutively issue the LDC instruction via a write to ITR and
2368 * change to fast mode, in a single bulk copy since DSCR == ITR + 4.
2369 * The instruction is issued into the core before the mode switch. */
2370 uint8_t command[8];
2371 target_buffer_set_u32(target, command, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2372 new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2373 target_buffer_set_u32(target, command + 4, new_dscr);
2374 retval = mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, command, 4, 2,
2375 armv7a->debug_base + CPUDBG_ITR);
2376 if (retval != ERROR_OK)
2377 return retval;
2378 *dscr = new_dscr;
2379
2380 /* Read the value transferred to DTRTX into the buffer. Due to fast
2381 * mode rules, this blocks until the instruction finishes executing and
2382 * then reissues the read instruction to read the next word from
2383 * memory. The last read of DTRTX in this call reads the second-to-last
2384 * word from memory and issues the read instruction for the last word.
2385 */
2386 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2387 4, count - 1, armv7a->debug_base + CPUDBG_DTRTX);
2388 if (retval != ERROR_OK)
2389 return retval;
2390
2391 /* Advance. */
2392 buffer += (count - 1) * 4;
2393 } else {
2394 /* Issue the LDC instruction via a write to ITR. */
2395 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2396 if (retval != ERROR_OK)
2397 return retval;
2398 }
2399
2400 /* Switch to non-blocking mode if not already in that mode. */
2401 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2402 if (retval != ERROR_OK)
2403 return retval;
2404
2405 /* Wait for last issued instruction to complete. */
2406 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2407 if (retval != ERROR_OK)
2408 return retval;
2409
2410 /* Check for faults and return early. */
2411 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2412 return ERROR_OK; /* A data fault is not considered a system failure. */
2413
2414 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2415 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2416 * check TXfull_l). Most of the time this will be free because TXfull_l
2417 * will be set immediately and cached in dscr. */
2418 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2419 DSCR_DTRTX_FULL_LATCHED, dscr);
2420 if (retval != ERROR_OK)
2421 return retval;
2422
2423 /* Read the value transferred to DTRTX into the buffer. This is the last
2424 * word. */
2425 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2426 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2427 if (retval != ERROR_OK)
2428 return retval;
2429 target_buffer_set_u32(target, buffer, u32);
2430
2431 return ERROR_OK;
2432 }
2433
2434 static int cortex_a_read_apb_ab_memory(struct target *target,
2435 uint32_t address, uint32_t size,
2436 uint32_t count, uint8_t *buffer)
2437 {
2438 /* Read memory through APB-AP. */
2439 int retval, final_retval;
2440 struct armv7a_common *armv7a = target_to_armv7a(target);
2441 struct adiv5_dap *swjdp = armv7a->arm.dap;
2442 struct arm *arm = &armv7a->arm;
2443 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2444
2445 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2446 address, size, count);
2447 if (target->state != TARGET_HALTED) {
2448 LOG_WARNING("target not halted");
2449 return ERROR_TARGET_NOT_HALTED;
2450 }
2451
2452 if (!count)
2453 return ERROR_OK;
2454
2455 /* Clear any abort. */
2456 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2457 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2458 if (retval != ERROR_OK)
2459 return retval;
2460
2461 /* Read DSCR */
2462 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2463 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2464 if (retval != ERROR_OK)
2465 return retval;
2466
2467 /* Switch to non-blocking mode if not already in that mode. */
2468 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2469 if (retval != ERROR_OK)
2470 goto out;
2471
2472 /* Mark R0 as dirty. */
2473 arm_reg_current(arm, 0)->dirty = true;
2474
2475 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2476 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2477 if (retval != ERROR_OK)
2478 goto out;
2479
2480 /* Get the memory address into R0. */
2481 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2482 armv7a->debug_base + CPUDBG_DTRRX, address);
2483 if (retval != ERROR_OK)
2484 goto out;
2485 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2486 if (retval != ERROR_OK)
2487 goto out;
2488
2489 if (size == 4 && (address % 4) == 0) {
2490 /* We are doing a word-aligned transfer, so use fast mode. */
2491 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2492 } else {
2493 /* Use slow path. */
2494 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2495 }
2496
2497 out:
2498 final_retval = retval;
2499
2500 /* Switch to non-blocking mode if not already in that mode. */
2501 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2502 if (final_retval == ERROR_OK)
2503 final_retval = retval;
2504
2505 /* Wait for last issued instruction to complete. */
2506 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2507 if (final_retval == ERROR_OK)
2508 final_retval = retval;
2509
2510 /* If there were any sticky abort flags, clear them. */
2511 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2512 fault_dscr = dscr;
2513 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2514 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2515 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2516 } else {
2517 fault_dscr = 0;
2518 }
2519
2520 /* Handle synchronous data faults. */
2521 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2522 if (final_retval == ERROR_OK) {
2523 /* Final return value will reflect cause of fault. */
2524 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2525 if (retval == ERROR_OK) {
2526 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2527 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2528 } else
2529 final_retval = retval;
2530 }
2531 /* Fault destroyed DFAR/DFSR; restore them. */
2532 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2533 if (retval != ERROR_OK)
2534 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2535 }
2536
2537 /* Handle asynchronous data faults. */
2538 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2539 if (final_retval == ERROR_OK)
2540 /* No other error has been recorded so far, so keep this one. */
2541 final_retval = ERROR_TARGET_DATA_ABORT;
2542 }
2543
2544 /* If the DCC is nonempty, clear it. */
2545 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2546 uint32_t dummy;
2547 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2548 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2549 if (final_retval == ERROR_OK)
2550 final_retval = retval;
2551 }
2552 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2553 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2554 if (final_retval == ERROR_OK)
2555 final_retval = retval;
2556 }
2557
2558 /* Done. */
2559 return final_retval;
2560 }
2561
2562
2563 /*
2564 * Cortex-A Memory access
2565 *
2566 * This is same Cortex M3 but we must also use the correct
2567 * ap number for every access.
2568 */
2569
2570 static int cortex_a_read_phys_memory(struct target *target,
2571 uint32_t address, uint32_t size,
2572 uint32_t count, uint8_t *buffer)
2573 {
2574 struct armv7a_common *armv7a = target_to_armv7a(target);
2575 struct adiv5_dap *swjdp = armv7a->arm.dap;
2576 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2577 uint8_t apsel = swjdp->apsel;
2578 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2579 address, size, count);
2580
2581 if (count && buffer) {
2582
2583 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2584
2585 /* read memory through AHB-AP */
2586 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2587 } else {
2588
2589 /* read memory through APB-AP */
2590 if (!armv7a->is_armv7r) {
2591 /* disable mmu */
2592 retval = cortex_a_mmu_modify(target, 0);
2593 if (retval != ERROR_OK)
2594 return retval;
2595 }
2596 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2597 }
2598 }
2599 return retval;
2600 }
2601
2602 static int cortex_a_read_memory(struct target *target, uint32_t address,
2603 uint32_t size, uint32_t count, uint8_t *buffer)
2604 {
2605 int mmu_enabled = 0;
2606 uint32_t virt, phys;
2607 int retval;
2608 struct armv7a_common *armv7a = target_to_armv7a(target);
2609 struct adiv5_dap *swjdp = armv7a->arm.dap;
2610 uint8_t apsel = swjdp->apsel;
2611
2612 /* cortex_a handles unaligned memory access */
2613 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2614 size, count);
2615
2616 /* determine if MMU was enabled on target stop */
2617 if (!armv7a->is_armv7r) {
2618 retval = cortex_a_mmu(target, &mmu_enabled);
2619 if (retval != ERROR_OK)
2620 return retval;
2621 }
2622
2623 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2624 if (mmu_enabled) {
2625 virt = address;
2626 retval = cortex_a_virt2phys(target, virt, &phys);
2627 if (retval != ERROR_OK)
2628 return retval;
2629
2630 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2631 virt, phys);
2632 address = phys;
2633 }
2634 retval = cortex_a_read_phys_memory(target, address, size,
2635 count, buffer);
2636 } else {
2637 if (mmu_enabled) {
2638 retval = cortex_a_check_address(target, address);
2639 if (retval != ERROR_OK)
2640 return retval;
2641 /* enable MMU as we could have disabled it for phys access */
2642 retval = cortex_a_mmu_modify(target, 1);
2643 if (retval != ERROR_OK)
2644 return retval;
2645 }
2646 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2647 }
2648 return retval;
2649 }
2650
2651 static int cortex_a_write_phys_memory(struct target *target,
2652 uint32_t address, uint32_t size,
2653 uint32_t count, const uint8_t *buffer)
2654 {
2655 struct armv7a_common *armv7a = target_to_armv7a(target);
2656 struct adiv5_dap *swjdp = armv7a->arm.dap;
2657 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2658 uint8_t apsel = swjdp->apsel;
2659
2660 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2661 size, count);
2662
2663 if (count && buffer) {
2664
2665 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2666
2667 /* write memory through AHB-AP */
2668 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2669 } else {
2670
2671 /* write memory through APB-AP */
2672 if (!armv7a->is_armv7r) {
2673 retval = cortex_a_mmu_modify(target, 0);
2674 if (retval != ERROR_OK)
2675 return retval;
2676 }
2677 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2678 }
2679 }
2680
2681
2682 /* REVISIT this op is generic ARMv7-A/R stuff */
2683 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2684 struct arm_dpm *dpm = armv7a->arm.dpm;
2685
2686 retval = dpm->prepare(dpm);
2687 if (retval != ERROR_OK)
2688 return retval;
2689
2690 /* The Cache handling will NOT work with MMU active, the
2691 * wrong addresses will be invalidated!
2692 *
2693 * For both ICache and DCache, walk all cache lines in the
2694 * address range. Cortex-A has fixed 64 byte line length.
2695 *
2696 * REVISIT per ARMv7, these may trigger watchpoints ...
2697 */
2698
2699 /* invalidate I-Cache */
2700 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2701 /* ICIMVAU - Invalidate Cache single entry
2702 * with MVA to PoU
2703 * MCR p15, 0, r0, c7, c5, 1
2704 */
2705 for (uint32_t cacheline = 0;
2706 cacheline < size * count;
2707 cacheline += 64) {
2708 retval = dpm->instr_write_data_r0(dpm,
2709 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2710 address + cacheline);
2711 if (retval != ERROR_OK)
2712 return retval;
2713 }
2714 }
2715
2716 /* invalidate D-Cache */
2717 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2718 /* DCIMVAC - Invalidate data Cache line
2719 * with MVA to PoC
2720 * MCR p15, 0, r0, c7, c6, 1
2721 */
2722 for (uint32_t cacheline = 0;
2723 cacheline < size * count;
2724 cacheline += 64) {
2725 retval = dpm->instr_write_data_r0(dpm,
2726 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2727 address + cacheline);
2728 if (retval != ERROR_OK)
2729 return retval;
2730 }
2731 }
2732
2733 /* (void) */ dpm->finish(dpm);
2734 }
2735
2736 return retval;
2737 }
2738
2739 static int cortex_a_write_memory(struct target *target, uint32_t address,
2740 uint32_t size, uint32_t count, const uint8_t *buffer)
2741 {
2742 int mmu_enabled = 0;
2743 uint32_t virt, phys;
2744 int retval;
2745 struct armv7a_common *armv7a = target_to_armv7a(target);
2746 struct adiv5_dap *swjdp = armv7a->arm.dap;
2747 uint8_t apsel = swjdp->apsel;
2748
2749 /* cortex_a handles unaligned memory access */
2750 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2751 size, count);
2752
2753 /* determine if MMU was enabled on target stop */
2754 if (!armv7a->is_armv7r) {
2755 retval = cortex_a_mmu(target, &mmu_enabled);
2756 if (retval != ERROR_OK)
2757 return retval;
2758 }
2759
2760 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2761 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2762 count);
2763 if (mmu_enabled) {
2764 virt = address;
2765 retval = cortex_a_virt2phys(target, virt, &phys);
2766 if (retval != ERROR_OK)
2767 return retval;
2768
2769 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2770 virt,
2771 phys);
2772 address = phys;
2773 }
2774 retval = cortex_a_write_phys_memory(target, address, size,
2775 count, buffer);
2776 } else {
2777 if (mmu_enabled) {
2778 retval = cortex_a_check_address(target, address);
2779 if (retval != ERROR_OK)
2780 return retval;
2781 /* enable MMU as we could have disabled it for phys access */
2782 retval = cortex_a_mmu_modify(target, 1);
2783 if (retval != ERROR_OK)
2784 return retval;
2785 }
2786 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2787 }
2788 return retval;
2789 }
2790
2791 static int cortex_a_handle_target_request(void *priv)
2792 {
2793 struct target *target = priv;
2794 struct armv7a_common *armv7a = target_to_armv7a(target);
2795 struct adiv5_dap *swjdp = armv7a->arm.dap;
2796 int retval;
2797
2798 if (!target_was_examined(target))
2799 return ERROR_OK;
2800 if (!target->dbg_msg_enabled)
2801 return ERROR_OK;
2802
2803 if (target->state == TARGET_RUNNING) {
2804 uint32_t request;
2805 uint32_t dscr;
2806 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2807 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2808
2809 /* check if we have data */
2810 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2811 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2812 armv7a->debug_base + CPUDBG_DTRTX, &request);
2813 if (retval == ERROR_OK) {
2814 target_request(target, request);
2815 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2816 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2817 }
2818 }
2819 }
2820
2821 return ERROR_OK;
2822 }
2823
2824 /*
2825 * Cortex-A target information and configuration
2826 */
2827
2828 static int cortex_a_examine_first(struct target *target)
2829 {
2830 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2831 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2832 struct adiv5_dap *swjdp = armv7a->arm.dap;
2833 int i;
2834 int retval = ERROR_OK;
2835 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2836
2837 /* We do one extra read to ensure DAP is configured,
2838 * we call ahbap_debugport_init(swjdp) instead
2839 */
2840 retval = ahbap_debugport_init(swjdp);
2841 if (retval != ERROR_OK)
2842 return retval;
2843
2844 /* Search for the APB-AB - it is needed for access to debug registers */
2845 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2846 if (retval != ERROR_OK) {
2847 LOG_ERROR("Could not find APB-AP for debug access");
2848 return retval;
2849 }
2850 /* Search for the AHB-AB */
2851 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2852 if (retval != ERROR_OK) {
2853 /* AHB-AP not found - use APB-AP */
2854 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2855 armv7a->memory_ap_available = false;
2856 } else {
2857 armv7a->memory_ap_available = true;
2858 }
2859
2860
2861 if (!target->dbgbase_set) {
2862 uint32_t dbgbase;
2863 /* Get ROM Table base */
2864 uint32_t apid;
2865 int32_t coreidx = target->coreid;
2866 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2867 target->cmd_name);
2868 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2869 if (retval != ERROR_OK)
2870 return retval;
2871 /* Lookup 0x15 -- Processor DAP */
2872 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2873 &armv7a->debug_base, &coreidx);
2874 if (retval != ERROR_OK)
2875 return retval;
2876 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2877 coreidx, armv7a->debug_base);
2878 } else
2879 armv7a->debug_base = target->dbgbase;
2880
2881 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2882 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2883 if (retval != ERROR_OK)
2884 return retval;
2885
2886 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2887 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2888 if (retval != ERROR_OK) {
2889 LOG_DEBUG("Examine %s failed", "CPUID");
2890 return retval;
2891 }
2892
2893 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2894 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2895 if (retval != ERROR_OK) {
2896 LOG_DEBUG("Examine %s failed", "CTYPR");
2897 return retval;
2898 }
2899
2900 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2901 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2902 if (retval != ERROR_OK) {
2903 LOG_DEBUG("Examine %s failed", "TTYPR");
2904 return retval;
2905 }
2906
2907 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2908 armv7a->debug_base + CPUDBG_DIDR, &didr);
2909 if (retval != ERROR_OK) {
2910 LOG_DEBUG("Examine %s failed", "DIDR");
2911 return retval;
2912 }
2913
2914 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2915 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2916 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2917 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2918
2919 cortex_a->cpuid = cpuid;
2920 cortex_a->ctypr = ctypr;
2921 cortex_a->ttypr = ttypr;
2922 cortex_a->didr = didr;
2923
2924 /* Unlocking the debug registers */
2925 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2926 CORTEX_A15_PARTNUM) {
2927
2928 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2929 armv7a->debug_base + CPUDBG_OSLAR,
2930 0);
2931
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 }
2936 /* Unlocking the debug registers */
2937 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2938 CORTEX_A7_PARTNUM) {
2939
2940 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2941 armv7a->debug_base + CPUDBG_OSLAR,
2942 0);
2943
2944 if (retval != ERROR_OK)
2945 return retval;
2946
2947 }
2948 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2949 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2950
2951 if (retval != ERROR_OK)
2952 return retval;
2953
2954 LOG_DEBUG("target->coreid %d DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2955
2956 armv7a->arm.core_type = ARM_MODE_MON;
2957 retval = cortex_a_dpm_setup(cortex_a, didr);
2958 if (retval != ERROR_OK)
2959 return retval;
2960
2961 /* Setup Breakpoint Register Pairs */
2962 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2963 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2964 cortex_a->brp_num_available = cortex_a->brp_num;
2965 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2966 /* cortex_a->brb_enabled = ????; */
2967 for (i = 0; i < cortex_a->brp_num; i++) {
2968 cortex_a->brp_list[i].used = 0;
2969 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2970 cortex_a->brp_list[i].type = BRP_NORMAL;
2971 else
2972 cortex_a->brp_list[i].type = BRP_CONTEXT;
2973 cortex_a->brp_list[i].value = 0;
2974 cortex_a->brp_list[i].control = 0;
2975 cortex_a->brp_list[i].BRPn = i;
2976 }
2977
2978 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2979
2980 target_set_examined(target);
2981 return ERROR_OK;
2982 }
2983
2984 static int cortex_a_examine(struct target *target)
2985 {
2986 int retval = ERROR_OK;
2987
2988 /* don't re-probe hardware after each reset */
2989 if (!target_was_examined(target))
2990 retval = cortex_a_examine_first(target);
2991
2992 /* Configure core debug access */
2993 if (retval == ERROR_OK)
2994 retval = cortex_a_init_debug_access(target);
2995
2996 return retval;
2997 }
2998
2999 /*
3000 * Cortex-A target creation and initialization
3001 */
3002
3003 static int cortex_a_init_target(struct command_context *cmd_ctx,
3004 struct target *target)
3005 {
3006 /* examine_first() does a bunch of this */
3007 return ERROR_OK;
3008 }
3009
3010 static int cortex_a_init_arch_info(struct target *target,
3011 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3012 {
3013 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3014 struct adiv5_dap *dap = &armv7a->dap;
3015
3016 armv7a->arm.dap = dap;
3017
3018 /* Setup struct cortex_a_common */
3019 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3020 /* tap has no dap initialized */
3021 if (!tap->dap) {
3022 armv7a->arm.dap = dap;
3023 /* Setup struct cortex_a_common */
3024
3025 /* prepare JTAG information for the new target */
3026 cortex_a->jtag_info.tap = tap;
3027 cortex_a->jtag_info.scann_size = 4;
3028
3029 /* Leave (only) generic DAP stuff for debugport_init() */
3030 dap->jtag_info = &cortex_a->jtag_info;
3031
3032 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
3033 dap->tar_autoincr_block = (1 << 10);
3034 dap->memaccess_tck = 80;
3035 tap->dap = dap;
3036 } else
3037 armv7a->arm.dap = tap->dap;
3038
3039 cortex_a->fast_reg_read = 0;
3040
3041 /* register arch-specific functions */
3042 armv7a->examine_debug_reason = NULL;
3043
3044 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3045
3046 armv7a->pre_restore_context = NULL;
3047
3048 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3049
3050
3051 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3052
3053 /* REVISIT v7a setup should be in a v7a-specific routine */
3054 armv7a_init_arch_info(target, armv7a);
3055 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3056
3057 return ERROR_OK;
3058 }
3059
3060 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3061 {
3062 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3063
3064 cortex_a->armv7a_common.is_armv7r = false;
3065
3066 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3067 }
3068
3069 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3070 {
3071 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3072
3073 cortex_a->armv7a_common.is_armv7r = true;
3074
3075 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3076 }
3077
3078
3079 static int cortex_a_mmu(struct target *target, int *enabled)
3080 {
3081 if (target->state != TARGET_HALTED) {
3082 LOG_ERROR("%s: target not halted", __func__);
3083 return ERROR_TARGET_INVALID;
3084 }
3085
3086 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3087 return ERROR_OK;
3088 }
3089
3090 static int cortex_a_virt2phys(struct target *target,
3091 uint32_t virt, uint32_t *phys)
3092 {
3093 int retval = ERROR_FAIL;
3094 struct armv7a_common *armv7a = target_to_armv7a(target);
3095 struct adiv5_dap *swjdp = armv7a->arm.dap;
3096 uint8_t apsel = swjdp->apsel;
3097 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
3098 uint32_t ret;
3099 retval = armv7a_mmu_translate_va(target,
3100 virt, &ret);
3101 if (retval != ERROR_OK)
3102 goto done;
3103 *phys = ret;
3104 } else {/* use this method if armv7a->memory_ap not selected
3105 * mmu must be enable in order to get a correct translation */
3106 retval = cortex_a_mmu_modify(target, 1);
3107 if (retval != ERROR_OK)
3108 goto done;
3109 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3110 }
3111 done:
3112 return retval;
3113 }
3114
3115 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3116 {
3117 struct target *target = get_current_target(CMD_CTX);
3118 struct armv7a_common *armv7a = target_to_armv7a(target);
3119
3120 return armv7a_handle_cache_info_command(CMD_CTX,
3121 &armv7a->armv7a_mmu.armv7a_cache);
3122 }
3123
3124
3125 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3126 {
3127 struct target *target = get_current_target(CMD_CTX);
3128 if (!target_was_examined(target)) {
3129 LOG_ERROR("target not examined yet");
3130 return ERROR_FAIL;
3131 }
3132
3133 return cortex_a_init_debug_access(target);
3134 }
3135 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3136 {
3137 struct target *target = get_current_target(CMD_CTX);
3138 /* check target is an smp target */
3139 struct target_list *head;
3140 struct target *curr;
3141 head = target->head;
3142 target->smp = 0;
3143 if (head != (struct target_list *)NULL) {
3144 while (head != (struct target_list *)NULL) {
3145 curr = head->target;
3146 curr->smp = 0;
3147 head = head->next;
3148 }
3149 /* fixes the target display to the debugger */
3150 target->gdb_service->target = target;
3151 }
3152 return ERROR_OK;
3153 }
3154
3155 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3156 {
3157 struct target *target = get_current_target(CMD_CTX);
3158 struct target_list *head;
3159 struct target *curr;
3160 head = target->head;
3161 if (head != (struct target_list *)NULL) {
3162 target->smp = 1;
3163 while (head != (struct target_list *)NULL) {
3164 curr = head->target;
3165 curr->smp = 1;
3166 head = head->next;
3167 }
3168 }
3169 return ERROR_OK;
3170 }
3171
3172 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3173 {
3174 struct target *target = get_current_target(CMD_CTX);
3175 int retval = ERROR_OK;
3176 struct target_list *head;
3177 head = target->head;
3178 if (head != (struct target_list *)NULL) {
3179 if (CMD_ARGC == 1) {
3180 int coreid = 0;
3181 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3182 if (ERROR_OK != retval)
3183 return retval;
3184 target->gdb_service->core[1] = coreid;
3185
3186 }
3187 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3188 , target->gdb_service->core[1]);
3189 }
3190 return ERROR_OK;
3191 }
3192
3193 static const struct command_registration cortex_a_exec_command_handlers[] = {
3194 {
3195 .name = "cache_info",
3196 .handler = cortex_a_handle_cache_info_command,
3197 .mode = COMMAND_EXEC,
3198 .help = "display information about target caches",
3199 .usage = "",
3200 },
3201 {
3202 .name = "dbginit",
3203 .handler = cortex_a_handle_dbginit_command,
3204 .mode = COMMAND_EXEC,
3205 .help = "Initialize core debug",
3206 .usage = "",
3207 },
3208 { .name = "smp_off",
3209 .handler = cortex_a_handle_smp_off_command,
3210 .mode = COMMAND_EXEC,
3211 .help = "Stop smp handling",
3212 .usage = "",},
3213 {
3214 .name = "smp_on",
3215 .handler = cortex_a_handle_smp_on_command,
3216 .mode = COMMAND_EXEC,
3217 .help = "Restart smp handling",
3218 .usage = "",
3219 },
3220 {
3221 .name = "smp_gdb",
3222 .handler = cortex_a_handle_smp_gdb_command,
3223 .mode = COMMAND_EXEC,
3224 .help = "display/fix current core played to gdb",
3225 .usage = "",
3226 },
3227
3228
3229 COMMAND_REGISTRATION_DONE
3230 };
3231 static const struct command_registration cortex_a_command_handlers[] = {
3232 {
3233 .chain = arm_command_handlers,
3234 },
3235 {
3236 .chain = armv7a_command_handlers,
3237 },
3238 {
3239 .name = "cortex_a",
3240 .mode = COMMAND_ANY,
3241 .help = "Cortex-A command group",
3242 .usage = "",
3243 .chain = cortex_a_exec_command_handlers,
3244 },
3245 COMMAND_REGISTRATION_DONE
3246 };
3247
3248 struct target_type cortexa_target = {
3249 .name = "cortex_a",
3250 .deprecated_name = "cortex_a8",
3251
3252 .poll = cortex_a_poll,
3253 .arch_state = armv7a_arch_state,
3254
3255 .halt = cortex_a_halt,
3256 .resume = cortex_a_resume,
3257 .step = cortex_a_step,
3258
3259 .assert_reset = cortex_a_assert_reset,
3260 .deassert_reset = cortex_a_deassert_reset,
3261
3262 /* REVISIT allow exporting VFP3 registers ... */
3263 .get_gdb_reg_list = arm_get_gdb_reg_list,
3264
3265 .read_memory = cortex_a_read_memory,
3266 .write_memory = cortex_a_write_memory,
3267
3268 .checksum_memory = arm_checksum_memory,
3269 .blank_check_memory = arm_blank_check_memory,
3270
3271 .run_algorithm = armv4_5_run_algorithm,
3272
3273 .add_breakpoint = cortex_a_add_breakpoint,
3274 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3275 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3276 .remove_breakpoint = cortex_a_remove_breakpoint,
3277 .add_watchpoint = NULL,
3278 .remove_watchpoint = NULL,
3279
3280 .commands = cortex_a_command_handlers,
3281 .target_create = cortex_a_target_create,
3282 .init_target = cortex_a_init_target,
3283 .examine = cortex_a_examine,
3284
3285 .read_phys_memory = cortex_a_read_phys_memory,
3286 .write_phys_memory = cortex_a_write_phys_memory,
3287 .mmu = cortex_a_mmu,
3288 .virt2phys = cortex_a_virt2phys,
3289 };
3290
3291 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3292 {
3293 .name = "cache_info",
3294 .handler = cortex_a_handle_cache_info_command,
3295 .mode = COMMAND_EXEC,
3296 .help = "display information about target caches",
3297 .usage = "",
3298 },
3299 {
3300 .name = "dbginit",
3301 .handler = cortex_a_handle_dbginit_command,
3302 .mode = COMMAND_EXEC,
3303 .help = "Initialize core debug",
3304 .usage = "",
3305 },
3306
3307 COMMAND_REGISTRATION_DONE
3308 };
3309 static const struct command_registration cortex_r4_command_handlers[] = {
3310 {
3311 .chain = arm_command_handlers,
3312 },
3313 {
3314 .chain = armv7a_command_handlers,
3315 },
3316 {
3317 .name = "cortex_r4",
3318 .mode = COMMAND_ANY,
3319 .help = "Cortex-R4 command group",
3320 .usage = "",
3321 .chain = cortex_r4_exec_command_handlers,
3322 },
3323 COMMAND_REGISTRATION_DONE
3324 };
3325
3326 struct target_type cortexr4_target = {
3327 .name = "cortex_r4",
3328
3329 .poll = cortex_a_poll,
3330 .arch_state = armv7a_arch_state,
3331
3332 .halt = cortex_a_halt,
3333 .resume = cortex_a_resume,
3334 .step = cortex_a_step,
3335
3336 .assert_reset = cortex_a_assert_reset,
3337 .deassert_reset = cortex_a_deassert_reset,
3338
3339 /* REVISIT allow exporting VFP3 registers ... */
3340 .get_gdb_reg_list = arm_get_gdb_reg_list,
3341
3342 .read_memory = cortex_a_read_memory,
3343 .write_memory = cortex_a_write_memory,
3344
3345 .checksum_memory = arm_checksum_memory,
3346 .blank_check_memory = arm_blank_check_memory,
3347
3348 .run_algorithm = armv4_5_run_algorithm,
3349
3350 .add_breakpoint = cortex_a_add_breakpoint,
3351 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3352 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3353 .remove_breakpoint = cortex_a_remove_breakpoint,
3354 .add_watchpoint = NULL,
3355 .remove_watchpoint = NULL,
3356
3357 .commands = cortex_r4_command_handlers,
3358 .target_create = cortex_r4_target_create,
3359 .init_target = cortex_a_init_target,
3360 .examine = cortex_a_examine,
3361 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)