semihosting: support fileio operation
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include <helper/time_support.h>
58
59 static int cortex_a_poll(struct target *target);
60 static int cortex_a_debug_entry(struct target *target);
61 static int cortex_a_restore_context(struct target *target, bool bpwp);
62 static int cortex_a_set_breakpoint(struct target *target,
63 struct breakpoint *breakpoint, uint8_t matchmode);
64 static int cortex_a_set_context_breakpoint(struct target *target,
65 struct breakpoint *breakpoint, uint8_t matchmode);
66 static int cortex_a_set_hybrid_breakpoint(struct target *target,
67 struct breakpoint *breakpoint);
68 static int cortex_a_unset_breakpoint(struct target *target,
69 struct breakpoint *breakpoint);
70 static int cortex_a_dap_read_coreregister_u32(struct target *target,
71 uint32_t *value, int regnum);
72 static int cortex_a_dap_write_coreregister_u32(struct target *target,
73 uint32_t value, int regnum);
74 static int cortex_a_mmu(struct target *target, int *enabled);
75 static int cortex_a_mmu_modify(struct target *target, int enable);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_cpu_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /*
101 * Set up ARM core for memory access.
102 * If !phys_access, switch to SVC mode and make sure MMU is on
103 * If phys_access, switch off mmu
104 */
105 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
106 {
107 struct armv7a_common *armv7a = target_to_armv7a(target);
108 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
109 int mmu_enabled = 0;
110
111 if (phys_access == 0) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a_mmu(target, &mmu_enabled);
114 if (mmu_enabled)
115 cortex_a_mmu_modify(target, 1);
116 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
117 /* overwrite DACR to all-manager */
118 armv7a->arm.mcr(target, 15,
119 0, 0, 3, 0,
120 0xFFFFFFFF);
121 }
122 } else {
123 cortex_a_mmu(target, &mmu_enabled);
124 if (mmu_enabled)
125 cortex_a_mmu_modify(target, 0);
126 }
127 return ERROR_OK;
128 }
129
130 /*
131 * Restore ARM core after memory access.
132 * If !phys_access, switch to previous mode
133 * If phys_access, restore MMU setting
134 */
135 static int cortex_a_post_memaccess(struct target *target, int phys_access)
136 {
137 struct armv7a_common *armv7a = target_to_armv7a(target);
138 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
139
140 if (phys_access == 0) {
141 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
142 /* restore */
143 armv7a->arm.mcr(target, 15,
144 0, 0, 3, 0,
145 cortex_a->cp15_dacr_reg);
146 }
147 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
148 } else {
149 int mmu_enabled = 0;
150 cortex_a_mmu(target, &mmu_enabled);
151 if (mmu_enabled)
152 cortex_a_mmu_modify(target, 1);
153 }
154 return ERROR_OK;
155 }
156
157
158 /* modify cp15_control_reg in order to enable or disable mmu for :
159 * - virt2phys address conversion
160 * - read or write memory in phys or virt address */
161 static int cortex_a_mmu_modify(struct target *target, int enable)
162 {
163 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
164 struct armv7a_common *armv7a = target_to_armv7a(target);
165 int retval = ERROR_OK;
166 int need_write = 0;
167
168 if (enable) {
169 /* if mmu enabled at target stop and mmu not enable */
170 if (!(cortex_a->cp15_control_reg & 0x1U)) {
171 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
172 return ERROR_FAIL;
173 }
174 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
175 cortex_a->cp15_control_reg_curr |= 0x1U;
176 need_write = 1;
177 }
178 } else {
179 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
180 cortex_a->cp15_control_reg_curr &= ~0x1U;
181 need_write = 1;
182 }
183 }
184
185 if (need_write) {
186 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
187 enable ? "enable mmu" : "disable mmu",
188 cortex_a->cp15_control_reg_curr);
189
190 retval = armv7a->arm.mcr(target, 15,
191 0, 0, /* op1, op2 */
192 1, 0, /* CRn, CRm */
193 cortex_a->cp15_control_reg_curr);
194 }
195 return retval;
196 }
197
198 /*
199 * Cortex-A Basic debug access, very low level assumes state is saved
200 */
201 static int cortex_a_init_debug_access(struct target *target)
202 {
203 struct armv7a_common *armv7a = target_to_armv7a(target);
204 int retval;
205
206 /* lock memory-mapped access to debug registers to prevent
207 * software interference */
208 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
209 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
210 if (retval != ERROR_OK)
211 return retval;
212
213 /* Disable cacheline fills and force cache write-through in debug state */
214 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
215 armv7a->debug_base + CPUDBG_DSCCR, 0);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /* Disable TLB lookup and refill/eviction in debug state */
220 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
221 armv7a->debug_base + CPUDBG_DSMCR, 0);
222 if (retval != ERROR_OK)
223 return retval;
224
225 /* Enabling of instruction execution in debug mode is done in debug_entry code */
226
227 /* Resync breakpoint registers */
228
229 /* Since this is likely called from init or reset, update target state information*/
230 return cortex_a_poll(target);
231 }
232
233 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
234 {
235 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
236 * Writes final value of DSCR into *dscr. Pass force to force always
237 * reading DSCR at least once. */
238 struct armv7a_common *armv7a = target_to_armv7a(target);
239 int64_t then = timeval_ms();
240 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
241 force = false;
242 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
243 armv7a->debug_base + CPUDBG_DSCR, dscr);
244 if (retval != ERROR_OK) {
245 LOG_ERROR("Could not read DSCR register");
246 return retval;
247 }
248 if (timeval_ms() > then + 1000) {
249 LOG_ERROR("Timeout waiting for InstrCompl=1");
250 return ERROR_FAIL;
251 }
252 }
253 return ERROR_OK;
254 }
255
256 /* To reduce needless round-trips, pass in a pointer to the current
257 * DSCR value. Initialize it to zero if you just need to know the
258 * value on return from this function; or DSCR_INSTR_COMP if you
259 * happen to know that no instruction is pending.
260 */
261 static int cortex_a_exec_opcode(struct target *target,
262 uint32_t opcode, uint32_t *dscr_p)
263 {
264 uint32_t dscr;
265 int retval;
266 struct armv7a_common *armv7a = target_to_armv7a(target);
267
268 dscr = dscr_p ? *dscr_p : 0;
269
270 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
271
272 /* Wait for InstrCompl bit to be set */
273 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
274 if (retval != ERROR_OK)
275 return retval;
276
277 retval = mem_ap_write_u32(armv7a->debug_ap,
278 armv7a->debug_base + CPUDBG_ITR, opcode);
279 if (retval != ERROR_OK)
280 return retval;
281
282 int64_t then = timeval_ms();
283 do {
284 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
285 armv7a->debug_base + CPUDBG_DSCR, &dscr);
286 if (retval != ERROR_OK) {
287 LOG_ERROR("Could not read DSCR register");
288 return retval;
289 }
290 if (timeval_ms() > then + 1000) {
291 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
292 return ERROR_FAIL;
293 }
294 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
295
296 if (dscr_p)
297 *dscr_p = dscr;
298
299 return retval;
300 }
301
302 /**************************************************************************
303 Read core register with very few exec_opcode, fast but needs work_area.
304 This can cause problems with MMU active.
305 **************************************************************************/
306 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
307 uint32_t *regfile)
308 {
309 int retval = ERROR_OK;
310 struct armv7a_common *armv7a = target_to_armv7a(target);
311
312 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
313 if (retval != ERROR_OK)
314 return retval;
315 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
316 if (retval != ERROR_OK)
317 return retval;
318 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
319 if (retval != ERROR_OK)
320 return retval;
321
322 retval = mem_ap_read_buf(armv7a->memory_ap,
323 (uint8_t *)(&regfile[1]), 4, 15, address);
324
325 return retval;
326 }
327
328 static int cortex_a_dap_read_coreregister_u32(struct target *target,
329 uint32_t *value, int regnum)
330 {
331 int retval = ERROR_OK;
332 uint8_t reg = regnum&0xFF;
333 uint32_t dscr = 0;
334 struct armv7a_common *armv7a = target_to_armv7a(target);
335
336 if (reg > 17)
337 return retval;
338
339 if (reg < 15) {
340 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
341 retval = cortex_a_exec_opcode(target,
342 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
343 &dscr);
344 if (retval != ERROR_OK)
345 return retval;
346 } else if (reg == 15) {
347 /* "MOV r0, r15"; then move r0 to DCCTX */
348 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
349 if (retval != ERROR_OK)
350 return retval;
351 retval = cortex_a_exec_opcode(target,
352 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
353 &dscr);
354 if (retval != ERROR_OK)
355 return retval;
356 } else {
357 /* "MRS r0, CPSR" or "MRS r0, SPSR"
358 * then move r0 to DCCTX
359 */
360 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
361 if (retval != ERROR_OK)
362 return retval;
363 retval = cortex_a_exec_opcode(target,
364 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
365 &dscr);
366 if (retval != ERROR_OK)
367 return retval;
368 }
369
370 /* Wait for DTRRXfull then read DTRRTX */
371 int64_t then = timeval_ms();
372 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
373 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
374 armv7a->debug_base + CPUDBG_DSCR, &dscr);
375 if (retval != ERROR_OK)
376 return retval;
377 if (timeval_ms() > then + 1000) {
378 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
379 return ERROR_FAIL;
380 }
381 }
382
383 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
384 armv7a->debug_base + CPUDBG_DTRTX, value);
385 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
386
387 return retval;
388 }
389
390 static int cortex_a_dap_write_coreregister_u32(struct target *target,
391 uint32_t value, int regnum)
392 {
393 int retval = ERROR_OK;
394 uint8_t Rd = regnum&0xFF;
395 uint32_t dscr;
396 struct armv7a_common *armv7a = target_to_armv7a(target);
397
398 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
399
400 /* Check that DCCRX is not full */
401 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
402 armv7a->debug_base + CPUDBG_DSCR, &dscr);
403 if (retval != ERROR_OK)
404 return retval;
405 if (dscr & DSCR_DTR_RX_FULL) {
406 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
407 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
408 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
409 &dscr);
410 if (retval != ERROR_OK)
411 return retval;
412 }
413
414 if (Rd > 17)
415 return retval;
416
417 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
418 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
419 retval = mem_ap_write_u32(armv7a->debug_ap,
420 armv7a->debug_base + CPUDBG_DTRRX, value);
421 if (retval != ERROR_OK)
422 return retval;
423
424 if (Rd < 15) {
425 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
426 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
427 &dscr);
428
429 if (retval != ERROR_OK)
430 return retval;
431 } else if (Rd == 15) {
432 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
433 * then "mov r15, r0"
434 */
435 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
436 &dscr);
437 if (retval != ERROR_OK)
438 return retval;
439 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
440 if (retval != ERROR_OK)
441 return retval;
442 } else {
443 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
444 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
445 */
446 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
447 &dscr);
448 if (retval != ERROR_OK)
449 return retval;
450 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
451 &dscr);
452 if (retval != ERROR_OK)
453 return retval;
454
455 /* "Prefetch flush" after modifying execution status in CPSR */
456 if (Rd == 16) {
457 retval = cortex_a_exec_opcode(target,
458 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
459 &dscr);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463 }
464
465 return retval;
466 }
467
468 /* Write to memory mapped registers directly with no cache or mmu handling */
469 static int cortex_a_dap_write_memap_register_u32(struct target *target,
470 uint32_t address,
471 uint32_t value)
472 {
473 int retval;
474 struct armv7a_common *armv7a = target_to_armv7a(target);
475
476 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
477
478 return retval;
479 }
480
481 /*
482 * Cortex-A implementation of Debug Programmer's Model
483 *
484 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
485 * so there's no need to poll for it before executing an instruction.
486 *
487 * NOTE that in several of these cases the "stall" mode might be useful.
488 * It'd let us queue a few operations together... prepare/finish might
489 * be the places to enable/disable that mode.
490 */
491
492 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
493 {
494 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
495 }
496
497 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
498 {
499 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
500 return mem_ap_write_u32(a->armv7a_common.debug_ap,
501 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
502 }
503
504 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
505 uint32_t *dscr_p)
506 {
507 uint32_t dscr = DSCR_INSTR_COMP;
508 int retval;
509
510 if (dscr_p)
511 dscr = *dscr_p;
512
513 /* Wait for DTRRXfull */
514 int64_t then = timeval_ms();
515 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
516 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
517 a->armv7a_common.debug_base + CPUDBG_DSCR,
518 &dscr);
519 if (retval != ERROR_OK)
520 return retval;
521 if (timeval_ms() > then + 1000) {
522 LOG_ERROR("Timeout waiting for read dcc");
523 return ERROR_FAIL;
524 }
525 }
526
527 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
528 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
529 if (retval != ERROR_OK)
530 return retval;
531 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
532
533 if (dscr_p)
534 *dscr_p = dscr;
535
536 return retval;
537 }
538
539 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
540 {
541 struct cortex_a_common *a = dpm_to_a(dpm);
542 uint32_t dscr;
543 int retval;
544
545 /* set up invariant: INSTR_COMP is set after ever DPM operation */
546 int64_t then = timeval_ms();
547 for (;; ) {
548 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
549 a->armv7a_common.debug_base + CPUDBG_DSCR,
550 &dscr);
551 if (retval != ERROR_OK)
552 return retval;
553 if ((dscr & DSCR_INSTR_COMP) != 0)
554 break;
555 if (timeval_ms() > then + 1000) {
556 LOG_ERROR("Timeout waiting for dpm prepare");
557 return ERROR_FAIL;
558 }
559 }
560
561 /* this "should never happen" ... */
562 if (dscr & DSCR_DTR_RX_FULL) {
563 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
564 /* Clear DCCRX */
565 retval = cortex_a_exec_opcode(
566 a->armv7a_common.arm.target,
567 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
568 &dscr);
569 if (retval != ERROR_OK)
570 return retval;
571 }
572
573 return retval;
574 }
575
576 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
577 {
578 /* REVISIT what could be done here? */
579 return ERROR_OK;
580 }
581
582 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
583 uint32_t opcode, uint32_t data)
584 {
585 struct cortex_a_common *a = dpm_to_a(dpm);
586 int retval;
587 uint32_t dscr = DSCR_INSTR_COMP;
588
589 retval = cortex_a_write_dcc(a, data);
590 if (retval != ERROR_OK)
591 return retval;
592
593 return cortex_a_exec_opcode(
594 a->armv7a_common.arm.target,
595 opcode,
596 &dscr);
597 }
598
599 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
600 uint32_t opcode, uint32_t data)
601 {
602 struct cortex_a_common *a = dpm_to_a(dpm);
603 uint32_t dscr = DSCR_INSTR_COMP;
604 int retval;
605
606 retval = cortex_a_write_dcc(a, data);
607 if (retval != ERROR_OK)
608 return retval;
609
610 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
611 retval = cortex_a_exec_opcode(
612 a->armv7a_common.arm.target,
613 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
614 &dscr);
615 if (retval != ERROR_OK)
616 return retval;
617
618 /* then the opcode, taking data from R0 */
619 retval = cortex_a_exec_opcode(
620 a->armv7a_common.arm.target,
621 opcode,
622 &dscr);
623
624 return retval;
625 }
626
627 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
628 {
629 struct target *target = dpm->arm->target;
630 uint32_t dscr = DSCR_INSTR_COMP;
631
632 /* "Prefetch flush" after modifying execution status in CPSR */
633 return cortex_a_exec_opcode(target,
634 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
635 &dscr);
636 }
637
638 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
639 uint32_t opcode, uint32_t *data)
640 {
641 struct cortex_a_common *a = dpm_to_a(dpm);
642 int retval;
643 uint32_t dscr = DSCR_INSTR_COMP;
644
645 /* the opcode, writing data to DCC */
646 retval = cortex_a_exec_opcode(
647 a->armv7a_common.arm.target,
648 opcode,
649 &dscr);
650 if (retval != ERROR_OK)
651 return retval;
652
653 return cortex_a_read_dcc(a, data, &dscr);
654 }
655
656
657 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
658 uint32_t opcode, uint32_t *data)
659 {
660 struct cortex_a_common *a = dpm_to_a(dpm);
661 uint32_t dscr = DSCR_INSTR_COMP;
662 int retval;
663
664 /* the opcode, writing data to R0 */
665 retval = cortex_a_exec_opcode(
666 a->armv7a_common.arm.target,
667 opcode,
668 &dscr);
669 if (retval != ERROR_OK)
670 return retval;
671
672 /* write R0 to DCC */
673 retval = cortex_a_exec_opcode(
674 a->armv7a_common.arm.target,
675 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
676 &dscr);
677 if (retval != ERROR_OK)
678 return retval;
679
680 return cortex_a_read_dcc(a, data, &dscr);
681 }
682
683 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
684 uint32_t addr, uint32_t control)
685 {
686 struct cortex_a_common *a = dpm_to_a(dpm);
687 uint32_t vr = a->armv7a_common.debug_base;
688 uint32_t cr = a->armv7a_common.debug_base;
689 int retval;
690
691 switch (index_t) {
692 case 0 ... 15: /* breakpoints */
693 vr += CPUDBG_BVR_BASE;
694 cr += CPUDBG_BCR_BASE;
695 break;
696 case 16 ... 31: /* watchpoints */
697 vr += CPUDBG_WVR_BASE;
698 cr += CPUDBG_WCR_BASE;
699 index_t -= 16;
700 break;
701 default:
702 return ERROR_FAIL;
703 }
704 vr += 4 * index_t;
705 cr += 4 * index_t;
706
707 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
708 (unsigned) vr, (unsigned) cr);
709
710 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
711 vr, addr);
712 if (retval != ERROR_OK)
713 return retval;
714 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
715 cr, control);
716 return retval;
717 }
718
719 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
720 {
721 struct cortex_a_common *a = dpm_to_a(dpm);
722 uint32_t cr;
723
724 switch (index_t) {
725 case 0 ... 15:
726 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
727 break;
728 case 16 ... 31:
729 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
730 index_t -= 16;
731 break;
732 default:
733 return ERROR_FAIL;
734 }
735 cr += 4 * index_t;
736
737 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
738
739 /* clear control register */
740 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
741 }
742
743 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
744 {
745 struct arm_dpm *dpm = &a->armv7a_common.dpm;
746 int retval;
747
748 dpm->arm = &a->armv7a_common.arm;
749 dpm->didr = didr;
750
751 dpm->prepare = cortex_a_dpm_prepare;
752 dpm->finish = cortex_a_dpm_finish;
753
754 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
755 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
756 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
757
758 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
759 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
760
761 dpm->bpwp_enable = cortex_a_bpwp_enable;
762 dpm->bpwp_disable = cortex_a_bpwp_disable;
763
764 retval = arm_dpm_setup(dpm);
765 if (retval == ERROR_OK)
766 retval = arm_dpm_initialize(dpm);
767
768 return retval;
769 }
770 static struct target *get_cortex_a(struct target *target, int32_t coreid)
771 {
772 struct target_list *head;
773 struct target *curr;
774
775 head = target->head;
776 while (head != (struct target_list *)NULL) {
777 curr = head->target;
778 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
779 return curr;
780 head = head->next;
781 }
782 return target;
783 }
784 static int cortex_a_halt(struct target *target);
785
786 static int cortex_a_halt_smp(struct target *target)
787 {
788 int retval = 0;
789 struct target_list *head;
790 struct target *curr;
791 head = target->head;
792 while (head != (struct target_list *)NULL) {
793 curr = head->target;
794 if ((curr != target) && (curr->state != TARGET_HALTED)
795 && target_was_examined(curr))
796 retval += cortex_a_halt(curr);
797 head = head->next;
798 }
799 return retval;
800 }
801
802 static int update_halt_gdb(struct target *target)
803 {
804 int retval = 0;
805 if (target->gdb_service && target->gdb_service->core[0] == -1) {
806 target->gdb_service->target = target;
807 target->gdb_service->core[0] = target->coreid;
808 retval += cortex_a_halt_smp(target);
809 }
810 return retval;
811 }
812
813 /*
814 * Cortex-A Run control
815 */
816
817 static int cortex_a_poll(struct target *target)
818 {
819 int retval = ERROR_OK;
820 uint32_t dscr;
821 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
822 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
823 enum target_state prev_target_state = target->state;
824 /* toggle to another core is done by gdb as follow */
825 /* maint packet J core_id */
826 /* continue */
827 /* the next polling trigger an halt event sent to gdb */
828 if ((target->state == TARGET_HALTED) && (target->smp) &&
829 (target->gdb_service) &&
830 (target->gdb_service->target == NULL)) {
831 target->gdb_service->target =
832 get_cortex_a(target, target->gdb_service->core[1]);
833 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
834 return retval;
835 }
836 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
837 armv7a->debug_base + CPUDBG_DSCR, &dscr);
838 if (retval != ERROR_OK)
839 return retval;
840 cortex_a->cpudbg_dscr = dscr;
841
842 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
843 if (prev_target_state != TARGET_HALTED) {
844 /* We have a halting debug event */
845 LOG_DEBUG("Target halted");
846 target->state = TARGET_HALTED;
847 if ((prev_target_state == TARGET_RUNNING)
848 || (prev_target_state == TARGET_UNKNOWN)
849 || (prev_target_state == TARGET_RESET)) {
850 retval = cortex_a_debug_entry(target);
851 if (retval != ERROR_OK)
852 return retval;
853 if (target->smp) {
854 retval = update_halt_gdb(target);
855 if (retval != ERROR_OK)
856 return retval;
857 }
858
859 if (arm_semihosting(target, &retval) != 0)
860 return retval;
861
862 target_call_event_callbacks(target,
863 TARGET_EVENT_HALTED);
864 }
865 if (prev_target_state == TARGET_DEBUG_RUNNING) {
866 LOG_DEBUG(" ");
867
868 retval = cortex_a_debug_entry(target);
869 if (retval != ERROR_OK)
870 return retval;
871 if (target->smp) {
872 retval = update_halt_gdb(target);
873 if (retval != ERROR_OK)
874 return retval;
875 }
876
877 target_call_event_callbacks(target,
878 TARGET_EVENT_DEBUG_HALTED);
879 }
880 }
881 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
882 target->state = TARGET_RUNNING;
883 else {
884 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
885 target->state = TARGET_UNKNOWN;
886 }
887
888 return retval;
889 }
890
891 static int cortex_a_halt(struct target *target)
892 {
893 int retval = ERROR_OK;
894 uint32_t dscr;
895 struct armv7a_common *armv7a = target_to_armv7a(target);
896
897 /*
898 * Tell the core to be halted by writing DRCR with 0x1
899 * and then wait for the core to be halted.
900 */
901 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
902 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
903 if (retval != ERROR_OK)
904 return retval;
905
906 /*
907 * enter halting debug mode
908 */
909 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
910 armv7a->debug_base + CPUDBG_DSCR, &dscr);
911 if (retval != ERROR_OK)
912 return retval;
913
914 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
915 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
916 if (retval != ERROR_OK)
917 return retval;
918
919 int64_t then = timeval_ms();
920 for (;; ) {
921 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
922 armv7a->debug_base + CPUDBG_DSCR, &dscr);
923 if (retval != ERROR_OK)
924 return retval;
925 if ((dscr & DSCR_CORE_HALTED) != 0)
926 break;
927 if (timeval_ms() > then + 1000) {
928 LOG_ERROR("Timeout waiting for halt");
929 return ERROR_FAIL;
930 }
931 }
932
933 target->debug_reason = DBG_REASON_DBGRQ;
934
935 return ERROR_OK;
936 }
937
938 static int cortex_a_internal_restore(struct target *target, int current,
939 uint32_t *address, int handle_breakpoints, int debug_execution)
940 {
941 struct armv7a_common *armv7a = target_to_armv7a(target);
942 struct arm *arm = &armv7a->arm;
943 int retval;
944 uint32_t resume_pc;
945
946 if (!debug_execution)
947 target_free_all_working_areas(target);
948
949 #if 0
950 if (debug_execution) {
951 /* Disable interrupts */
952 /* We disable interrupts in the PRIMASK register instead of
953 * masking with C_MASKINTS,
954 * This is probably the same issue as Cortex-M3 Errata 377493:
955 * C_MASKINTS in parallel with disabled interrupts can cause
956 * local faults to not be taken. */
957 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
958 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
959 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
960
961 /* Make sure we are in Thumb mode */
962 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
963 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
964 32) | (1 << 24));
965 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
966 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
967 }
968 #endif
969
970 /* current = 1: continue on current pc, otherwise continue at <address> */
971 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
972 if (!current)
973 resume_pc = *address;
974 else
975 *address = resume_pc;
976
977 /* Make sure that the Armv7 gdb thumb fixups does not
978 * kill the return address
979 */
980 switch (arm->core_state) {
981 case ARM_STATE_ARM:
982 resume_pc &= 0xFFFFFFFC;
983 break;
984 case ARM_STATE_THUMB:
985 case ARM_STATE_THUMB_EE:
986 /* When the return address is loaded into PC
987 * bit 0 must be 1 to stay in Thumb state
988 */
989 resume_pc |= 0x1;
990 break;
991 case ARM_STATE_JAZELLE:
992 LOG_ERROR("How do I resume into Jazelle state??");
993 return ERROR_FAIL;
994 }
995 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
996 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
997 arm->pc->dirty = 1;
998 arm->pc->valid = 1;
999
1000 /* restore dpm_mode at system halt */
1001 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1002 /* called it now before restoring context because it uses cpu
1003 * register r0 for restoring cp15 control register */
1004 retval = cortex_a_restore_cp15_control_reg(target);
1005 if (retval != ERROR_OK)
1006 return retval;
1007 retval = cortex_a_restore_context(target, handle_breakpoints);
1008 if (retval != ERROR_OK)
1009 return retval;
1010 target->debug_reason = DBG_REASON_NOTHALTED;
1011 target->state = TARGET_RUNNING;
1012
1013 /* registers are now invalid */
1014 register_cache_invalidate(arm->core_cache);
1015
1016 #if 0
1017 /* the front-end may request us not to handle breakpoints */
1018 if (handle_breakpoints) {
1019 /* Single step past breakpoint at current address */
1020 breakpoint = breakpoint_find(target, resume_pc);
1021 if (breakpoint) {
1022 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1023 cortex_m3_unset_breakpoint(target, breakpoint);
1024 cortex_m3_single_step_core(target);
1025 cortex_m3_set_breakpoint(target, breakpoint);
1026 }
1027 }
1028
1029 #endif
1030 return retval;
1031 }
1032
1033 static int cortex_a_internal_restart(struct target *target)
1034 {
1035 struct armv7a_common *armv7a = target_to_armv7a(target);
1036 struct arm *arm = &armv7a->arm;
1037 int retval;
1038 uint32_t dscr;
1039 /*
1040 * * Restart core and wait for it to be started. Clear ITRen and sticky
1041 * * exception flags: see ARMv7 ARM, C5.9.
1042 *
1043 * REVISIT: for single stepping, we probably want to
1044 * disable IRQs by default, with optional override...
1045 */
1046
1047 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1048 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1049 if (retval != ERROR_OK)
1050 return retval;
1051
1052 if ((dscr & DSCR_INSTR_COMP) == 0)
1053 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1054
1055 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1056 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1057 if (retval != ERROR_OK)
1058 return retval;
1059
1060 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1061 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1062 DRCR_CLEAR_EXCEPTIONS);
1063 if (retval != ERROR_OK)
1064 return retval;
1065
1066 int64_t then = timeval_ms();
1067 for (;; ) {
1068 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1069 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1070 if (retval != ERROR_OK)
1071 return retval;
1072 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1073 break;
1074 if (timeval_ms() > then + 1000) {
1075 LOG_ERROR("Timeout waiting for resume");
1076 return ERROR_FAIL;
1077 }
1078 }
1079
1080 target->debug_reason = DBG_REASON_NOTHALTED;
1081 target->state = TARGET_RUNNING;
1082
1083 /* registers are now invalid */
1084 register_cache_invalidate(arm->core_cache);
1085
1086 return ERROR_OK;
1087 }
1088
1089 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1090 {
1091 int retval = 0;
1092 struct target_list *head;
1093 struct target *curr;
1094 uint32_t address;
1095 head = target->head;
1096 while (head != (struct target_list *)NULL) {
1097 curr = head->target;
1098 if ((curr != target) && (curr->state != TARGET_RUNNING)
1099 && target_was_examined(curr)) {
1100 /* resume current address , not in step mode */
1101 retval += cortex_a_internal_restore(curr, 1, &address,
1102 handle_breakpoints, 0);
1103 retval += cortex_a_internal_restart(curr);
1104 }
1105 head = head->next;
1106
1107 }
1108 return retval;
1109 }
1110
1111 static int cortex_a_resume(struct target *target, int current,
1112 uint32_t address, int handle_breakpoints, int debug_execution)
1113 {
1114 int retval = 0;
1115 /* dummy resume for smp toggle in order to reduce gdb impact */
1116 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1117 /* simulate a start and halt of target */
1118 target->gdb_service->target = NULL;
1119 target->gdb_service->core[0] = target->gdb_service->core[1];
1120 /* fake resume at next poll we play the target core[1], see poll*/
1121 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1122 return 0;
1123 }
1124 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1125 if (target->smp) {
1126 target->gdb_service->core[0] = -1;
1127 retval = cortex_a_restore_smp(target, handle_breakpoints);
1128 if (retval != ERROR_OK)
1129 return retval;
1130 }
1131 cortex_a_internal_restart(target);
1132
1133 if (!debug_execution) {
1134 target->state = TARGET_RUNNING;
1135 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1136 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1137 } else {
1138 target->state = TARGET_DEBUG_RUNNING;
1139 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1140 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1141 }
1142
1143 return ERROR_OK;
1144 }
1145
1146 static int cortex_a_debug_entry(struct target *target)
1147 {
1148 int i;
1149 uint32_t regfile[16], cpsr, spsr, dscr;
1150 int retval = ERROR_OK;
1151 struct working_area *regfile_working_area = NULL;
1152 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1153 struct armv7a_common *armv7a = target_to_armv7a(target);
1154 struct arm *arm = &armv7a->arm;
1155 struct reg *reg;
1156
1157 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1158
1159 /* REVISIT surely we should not re-read DSCR !! */
1160 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1161 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1162 if (retval != ERROR_OK)
1163 return retval;
1164
1165 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1166 * imprecise data aborts get discarded by issuing a Data
1167 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1168 */
1169
1170 /* Enable the ITR execution once we are in debug mode */
1171 dscr |= DSCR_ITR_EN;
1172 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1173 armv7a->debug_base + CPUDBG_DSCR, dscr);
1174 if (retval != ERROR_OK)
1175 return retval;
1176
1177 /* Examine debug reason */
1178 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1179
1180 /* save address of instruction that triggered the watchpoint? */
1181 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1182 uint32_t wfar;
1183
1184 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1185 armv7a->debug_base + CPUDBG_WFAR,
1186 &wfar);
1187 if (retval != ERROR_OK)
1188 return retval;
1189 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1190 }
1191
1192 /* REVISIT fast_reg_read is never set ... */
1193
1194 /* Examine target state and mode */
1195 if (cortex_a->fast_reg_read)
1196 target_alloc_working_area(target, 64, &regfile_working_area);
1197
1198
1199 /* First load register acessible through core debug port*/
1200 if (!regfile_working_area)
1201 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1202 else {
1203 retval = cortex_a_read_regs_through_mem(target,
1204 regfile_working_area->address, regfile);
1205
1206 target_free_working_area(target, regfile_working_area);
1207 if (retval != ERROR_OK)
1208 return retval;
1209
1210 /* read Current PSR */
1211 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1212 /* store current cpsr */
1213 if (retval != ERROR_OK)
1214 return retval;
1215
1216 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1217
1218 arm_set_cpsr(arm, cpsr);
1219
1220 /* update cache */
1221 for (i = 0; i <= ARM_PC; i++) {
1222 reg = arm_reg_current(arm, i);
1223
1224 buf_set_u32(reg->value, 0, 32, regfile[i]);
1225 reg->valid = 1;
1226 reg->dirty = 0;
1227 }
1228
1229 /* Fixup PC Resume Address */
1230 if (cpsr & (1 << 5)) {
1231 /* T bit set for Thumb or ThumbEE state */
1232 regfile[ARM_PC] -= 4;
1233 } else {
1234 /* ARM state */
1235 regfile[ARM_PC] -= 8;
1236 }
1237
1238 reg = arm->pc;
1239 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1240 reg->dirty = reg->valid;
1241 }
1242
1243 /* read Saved PSR */
1244 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1245 /* store current spsr */
1246 if (retval != ERROR_OK)
1247 return retval;
1248
1249 reg = arm->spsr;
1250 buf_set_u32(reg->value, 0, 32, spsr);
1251 reg->valid = 1;
1252 reg->dirty = 0;
1253
1254 #if 0
1255 /* TODO, Move this */
1256 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1257 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1258 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1259
1260 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1261 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1262
1263 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1264 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1265 #endif
1266
1267 /* Are we in an exception handler */
1268 /* armv4_5->exception_number = 0; */
1269 if (armv7a->post_debug_entry) {
1270 retval = armv7a->post_debug_entry(target);
1271 if (retval != ERROR_OK)
1272 return retval;
1273 }
1274
1275 return retval;
1276 }
1277
1278 static int cortex_a_post_debug_entry(struct target *target)
1279 {
1280 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1281 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1282 int retval;
1283
1284 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1285 retval = armv7a->arm.mrc(target, 15,
1286 0, 0, /* op1, op2 */
1287 1, 0, /* CRn, CRm */
1288 &cortex_a->cp15_control_reg);
1289 if (retval != ERROR_OK)
1290 return retval;
1291 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1292 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1293
1294 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1295 armv7a_identify_cache(target);
1296
1297 if (armv7a->is_armv7r) {
1298 armv7a->armv7a_mmu.mmu_enabled = 0;
1299 } else {
1300 armv7a->armv7a_mmu.mmu_enabled =
1301 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1302 }
1303 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1304 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1305 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1306 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1307 cortex_a->curr_mode = armv7a->arm.core_mode;
1308
1309 /* switch to SVC mode to read DACR */
1310 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1311 armv7a->arm.mrc(target, 15,
1312 0, 0, 3, 0,
1313 &cortex_a->cp15_dacr_reg);
1314
1315 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1316 cortex_a->cp15_dacr_reg);
1317
1318 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1319 return ERROR_OK;
1320 }
1321
1322 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1323 {
1324 struct armv7a_common *armv7a = target_to_armv7a(target);
1325 uint32_t dscr;
1326
1327 /* Read DSCR */
1328 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1329 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1330 if (ERROR_OK != retval)
1331 return retval;
1332
1333 /* clear bitfield */
1334 dscr &= ~bit_mask;
1335 /* put new value */
1336 dscr |= value & bit_mask;
1337
1338 /* write new DSCR */
1339 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1340 armv7a->debug_base + CPUDBG_DSCR, dscr);
1341 return retval;
1342 }
1343
1344 static int cortex_a_step(struct target *target, int current, uint32_t address,
1345 int handle_breakpoints)
1346 {
1347 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1348 struct armv7a_common *armv7a = target_to_armv7a(target);
1349 struct arm *arm = &armv7a->arm;
1350 struct breakpoint *breakpoint = NULL;
1351 struct breakpoint stepbreakpoint;
1352 struct reg *r;
1353 int retval;
1354
1355 if (target->state != TARGET_HALTED) {
1356 LOG_WARNING("target not halted");
1357 return ERROR_TARGET_NOT_HALTED;
1358 }
1359
1360 /* current = 1: continue on current pc, otherwise continue at <address> */
1361 r = arm->pc;
1362 if (!current)
1363 buf_set_u32(r->value, 0, 32, address);
1364 else
1365 address = buf_get_u32(r->value, 0, 32);
1366
1367 /* The front-end may request us not to handle breakpoints.
1368 * But since Cortex-A uses breakpoint for single step,
1369 * we MUST handle breakpoints.
1370 */
1371 handle_breakpoints = 1;
1372 if (handle_breakpoints) {
1373 breakpoint = breakpoint_find(target, address);
1374 if (breakpoint)
1375 cortex_a_unset_breakpoint(target, breakpoint);
1376 }
1377
1378 /* Setup single step breakpoint */
1379 stepbreakpoint.address = address;
1380 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1381 ? 2 : 4;
1382 stepbreakpoint.type = BKPT_HARD;
1383 stepbreakpoint.set = 0;
1384
1385 /* Disable interrupts during single step if requested */
1386 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1387 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1388 if (ERROR_OK != retval)
1389 return retval;
1390 }
1391
1392 /* Break on IVA mismatch */
1393 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1394
1395 target->debug_reason = DBG_REASON_SINGLESTEP;
1396
1397 retval = cortex_a_resume(target, 1, address, 0, 0);
1398 if (retval != ERROR_OK)
1399 return retval;
1400
1401 int64_t then = timeval_ms();
1402 while (target->state != TARGET_HALTED) {
1403 retval = cortex_a_poll(target);
1404 if (retval != ERROR_OK)
1405 return retval;
1406 if (timeval_ms() > then + 1000) {
1407 LOG_ERROR("timeout waiting for target halt");
1408 return ERROR_FAIL;
1409 }
1410 }
1411
1412 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1413
1414 /* Re-enable interrupts if they were disabled */
1415 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1416 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1417 if (ERROR_OK != retval)
1418 return retval;
1419 }
1420
1421
1422 target->debug_reason = DBG_REASON_BREAKPOINT;
1423
1424 if (breakpoint)
1425 cortex_a_set_breakpoint(target, breakpoint, 0);
1426
1427 if (target->state != TARGET_HALTED)
1428 LOG_DEBUG("target stepped");
1429
1430 return ERROR_OK;
1431 }
1432
1433 static int cortex_a_restore_context(struct target *target, bool bpwp)
1434 {
1435 struct armv7a_common *armv7a = target_to_armv7a(target);
1436
1437 LOG_DEBUG(" ");
1438
1439 if (armv7a->pre_restore_context)
1440 armv7a->pre_restore_context(target);
1441
1442 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1443 }
1444
1445 /*
1446 * Cortex-A Breakpoint and watchpoint functions
1447 */
1448
1449 /* Setup hardware Breakpoint Register Pair */
1450 static int cortex_a_set_breakpoint(struct target *target,
1451 struct breakpoint *breakpoint, uint8_t matchmode)
1452 {
1453 int retval;
1454 int brp_i = 0;
1455 uint32_t control;
1456 uint8_t byte_addr_select = 0x0F;
1457 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1458 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1459 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1460
1461 if (breakpoint->set) {
1462 LOG_WARNING("breakpoint already set");
1463 return ERROR_OK;
1464 }
1465
1466 if (breakpoint->type == BKPT_HARD) {
1467 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1468 brp_i++;
1469 if (brp_i >= cortex_a->brp_num) {
1470 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1471 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1472 }
1473 breakpoint->set = brp_i + 1;
1474 if (breakpoint->length == 2)
1475 byte_addr_select = (3 << (breakpoint->address & 0x02));
1476 control = ((matchmode & 0x7) << 20)
1477 | (byte_addr_select << 5)
1478 | (3 << 1) | 1;
1479 brp_list[brp_i].used = 1;
1480 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1481 brp_list[brp_i].control = control;
1482 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1483 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1484 brp_list[brp_i].value);
1485 if (retval != ERROR_OK)
1486 return retval;
1487 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1488 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1489 brp_list[brp_i].control);
1490 if (retval != ERROR_OK)
1491 return retval;
1492 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1493 brp_list[brp_i].control,
1494 brp_list[brp_i].value);
1495 } else if (breakpoint->type == BKPT_SOFT) {
1496 uint8_t code[4];
1497 if (breakpoint->length == 2)
1498 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1499 else
1500 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1501 retval = target_read_memory(target,
1502 breakpoint->address & 0xFFFFFFFE,
1503 breakpoint->length, 1,
1504 breakpoint->orig_instr);
1505 if (retval != ERROR_OK)
1506 return retval;
1507
1508 /* make sure data cache is cleaned & invalidated down to PoC */
1509 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1510 armv7a_cache_flush_virt(target, breakpoint->address,
1511 breakpoint->length);
1512 }
1513
1514 retval = target_write_memory(target,
1515 breakpoint->address & 0xFFFFFFFE,
1516 breakpoint->length, 1, code);
1517 if (retval != ERROR_OK)
1518 return retval;
1519
1520 /* update i-cache at breakpoint location */
1521 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1522 breakpoint->length);
1523 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1524 breakpoint->length);
1525
1526 breakpoint->set = 0x11; /* Any nice value but 0 */
1527 }
1528
1529 return ERROR_OK;
1530 }
1531
1532 static int cortex_a_set_context_breakpoint(struct target *target,
1533 struct breakpoint *breakpoint, uint8_t matchmode)
1534 {
1535 int retval = ERROR_FAIL;
1536 int brp_i = 0;
1537 uint32_t control;
1538 uint8_t byte_addr_select = 0x0F;
1539 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1540 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1541 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1542
1543 if (breakpoint->set) {
1544 LOG_WARNING("breakpoint already set");
1545 return retval;
1546 }
1547 /*check available context BRPs*/
1548 while ((brp_list[brp_i].used ||
1549 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1550 brp_i++;
1551
1552 if (brp_i >= cortex_a->brp_num) {
1553 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1554 return ERROR_FAIL;
1555 }
1556
1557 breakpoint->set = brp_i + 1;
1558 control = ((matchmode & 0x7) << 20)
1559 | (byte_addr_select << 5)
1560 | (3 << 1) | 1;
1561 brp_list[brp_i].used = 1;
1562 brp_list[brp_i].value = (breakpoint->asid);
1563 brp_list[brp_i].control = control;
1564 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1565 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1566 brp_list[brp_i].value);
1567 if (retval != ERROR_OK)
1568 return retval;
1569 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1570 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1571 brp_list[brp_i].control);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1575 brp_list[brp_i].control,
1576 brp_list[brp_i].value);
1577 return ERROR_OK;
1578
1579 }
1580
1581 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1582 {
1583 int retval = ERROR_FAIL;
1584 int brp_1 = 0; /* holds the contextID pair */
1585 int brp_2 = 0; /* holds the IVA pair */
1586 uint32_t control_CTX, control_IVA;
1587 uint8_t CTX_byte_addr_select = 0x0F;
1588 uint8_t IVA_byte_addr_select = 0x0F;
1589 uint8_t CTX_machmode = 0x03;
1590 uint8_t IVA_machmode = 0x01;
1591 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1592 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1593 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1594
1595 if (breakpoint->set) {
1596 LOG_WARNING("breakpoint already set");
1597 return retval;
1598 }
1599 /*check available context BRPs*/
1600 while ((brp_list[brp_1].used ||
1601 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1602 brp_1++;
1603
1604 printf("brp(CTX) found num: %d\n", brp_1);
1605 if (brp_1 >= cortex_a->brp_num) {
1606 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1607 return ERROR_FAIL;
1608 }
1609
1610 while ((brp_list[brp_2].used ||
1611 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1612 brp_2++;
1613
1614 printf("brp(IVA) found num: %d\n", brp_2);
1615 if (brp_2 >= cortex_a->brp_num) {
1616 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1617 return ERROR_FAIL;
1618 }
1619
1620 breakpoint->set = brp_1 + 1;
1621 breakpoint->linked_BRP = brp_2;
1622 control_CTX = ((CTX_machmode & 0x7) << 20)
1623 | (brp_2 << 16)
1624 | (0 << 14)
1625 | (CTX_byte_addr_select << 5)
1626 | (3 << 1) | 1;
1627 brp_list[brp_1].used = 1;
1628 brp_list[brp_1].value = (breakpoint->asid);
1629 brp_list[brp_1].control = control_CTX;
1630 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1631 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1632 brp_list[brp_1].value);
1633 if (retval != ERROR_OK)
1634 return retval;
1635 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1636 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1637 brp_list[brp_1].control);
1638 if (retval != ERROR_OK)
1639 return retval;
1640
1641 control_IVA = ((IVA_machmode & 0x7) << 20)
1642 | (brp_1 << 16)
1643 | (IVA_byte_addr_select << 5)
1644 | (3 << 1) | 1;
1645 brp_list[brp_2].used = 1;
1646 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1647 brp_list[brp_2].control = control_IVA;
1648 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1649 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1650 brp_list[brp_2].value);
1651 if (retval != ERROR_OK)
1652 return retval;
1653 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1654 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1655 brp_list[brp_2].control);
1656 if (retval != ERROR_OK)
1657 return retval;
1658
1659 return ERROR_OK;
1660 }
1661
1662 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1663 {
1664 int retval;
1665 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1666 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1667 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1668
1669 if (!breakpoint->set) {
1670 LOG_WARNING("breakpoint not set");
1671 return ERROR_OK;
1672 }
1673
1674 if (breakpoint->type == BKPT_HARD) {
1675 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1676 int brp_i = breakpoint->set - 1;
1677 int brp_j = breakpoint->linked_BRP;
1678 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1679 LOG_DEBUG("Invalid BRP number in breakpoint");
1680 return ERROR_OK;
1681 }
1682 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1683 brp_list[brp_i].control, brp_list[brp_i].value);
1684 brp_list[brp_i].used = 0;
1685 brp_list[brp_i].value = 0;
1686 brp_list[brp_i].control = 0;
1687 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1688 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1689 brp_list[brp_i].control);
1690 if (retval != ERROR_OK)
1691 return retval;
1692 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1693 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1694 brp_list[brp_i].value);
1695 if (retval != ERROR_OK)
1696 return retval;
1697 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1698 LOG_DEBUG("Invalid BRP number in breakpoint");
1699 return ERROR_OK;
1700 }
1701 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1702 brp_list[brp_j].control, brp_list[brp_j].value);
1703 brp_list[brp_j].used = 0;
1704 brp_list[brp_j].value = 0;
1705 brp_list[brp_j].control = 0;
1706 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1707 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1708 brp_list[brp_j].control);
1709 if (retval != ERROR_OK)
1710 return retval;
1711 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1712 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1713 brp_list[brp_j].value);
1714 if (retval != ERROR_OK)
1715 return retval;
1716 breakpoint->linked_BRP = 0;
1717 breakpoint->set = 0;
1718 return ERROR_OK;
1719
1720 } else {
1721 int brp_i = breakpoint->set - 1;
1722 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1723 LOG_DEBUG("Invalid BRP number in breakpoint");
1724 return ERROR_OK;
1725 }
1726 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1727 brp_list[brp_i].control, brp_list[brp_i].value);
1728 brp_list[brp_i].used = 0;
1729 brp_list[brp_i].value = 0;
1730 brp_list[brp_i].control = 0;
1731 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1732 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1733 brp_list[brp_i].control);
1734 if (retval != ERROR_OK)
1735 return retval;
1736 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1737 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1738 brp_list[brp_i].value);
1739 if (retval != ERROR_OK)
1740 return retval;
1741 breakpoint->set = 0;
1742 return ERROR_OK;
1743 }
1744 } else {
1745
1746 /* make sure data cache is cleaned & invalidated down to PoC */
1747 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1748 armv7a_cache_flush_virt(target, breakpoint->address,
1749 breakpoint->length);
1750 }
1751
1752 /* restore original instruction (kept in target endianness) */
1753 if (breakpoint->length == 4) {
1754 retval = target_write_memory(target,
1755 breakpoint->address & 0xFFFFFFFE,
1756 4, 1, breakpoint->orig_instr);
1757 if (retval != ERROR_OK)
1758 return retval;
1759 } else {
1760 retval = target_write_memory(target,
1761 breakpoint->address & 0xFFFFFFFE,
1762 2, 1, breakpoint->orig_instr);
1763 if (retval != ERROR_OK)
1764 return retval;
1765 }
1766
1767 /* update i-cache at breakpoint location */
1768 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1769 breakpoint->length);
1770 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1771 breakpoint->length);
1772 }
1773 breakpoint->set = 0;
1774
1775 return ERROR_OK;
1776 }
1777
1778 static int cortex_a_add_breakpoint(struct target *target,
1779 struct breakpoint *breakpoint)
1780 {
1781 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1782
1783 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1784 LOG_INFO("no hardware breakpoint available");
1785 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1786 }
1787
1788 if (breakpoint->type == BKPT_HARD)
1789 cortex_a->brp_num_available--;
1790
1791 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1792 }
1793
1794 static int cortex_a_add_context_breakpoint(struct target *target,
1795 struct breakpoint *breakpoint)
1796 {
1797 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1798
1799 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1800 LOG_INFO("no hardware breakpoint available");
1801 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1802 }
1803
1804 if (breakpoint->type == BKPT_HARD)
1805 cortex_a->brp_num_available--;
1806
1807 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1808 }
1809
1810 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1811 struct breakpoint *breakpoint)
1812 {
1813 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1814
1815 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1816 LOG_INFO("no hardware breakpoint available");
1817 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1818 }
1819
1820 if (breakpoint->type == BKPT_HARD)
1821 cortex_a->brp_num_available--;
1822
1823 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1824 }
1825
1826
1827 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1828 {
1829 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1830
1831 #if 0
1832 /* It is perfectly possible to remove breakpoints while the target is running */
1833 if (target->state != TARGET_HALTED) {
1834 LOG_WARNING("target not halted");
1835 return ERROR_TARGET_NOT_HALTED;
1836 }
1837 #endif
1838
1839 if (breakpoint->set) {
1840 cortex_a_unset_breakpoint(target, breakpoint);
1841 if (breakpoint->type == BKPT_HARD)
1842 cortex_a->brp_num_available++;
1843 }
1844
1845
1846 return ERROR_OK;
1847 }
1848
1849 /*
1850 * Cortex-A Reset functions
1851 */
1852
1853 static int cortex_a_assert_reset(struct target *target)
1854 {
1855 struct armv7a_common *armv7a = target_to_armv7a(target);
1856
1857 LOG_DEBUG(" ");
1858
1859 /* FIXME when halt is requested, make it work somehow... */
1860
1861 /* This function can be called in "target not examined" state */
1862
1863 /* Issue some kind of warm reset. */
1864 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1865 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1866 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1867 /* REVISIT handle "pulls" cases, if there's
1868 * hardware that needs them to work.
1869 */
1870 if (target->reset_halt)
1871 if (jtag_get_reset_config() & RESET_SRST_NO_GATING)
1872 jtag_add_reset(0, 1);
1873 } else {
1874 LOG_ERROR("%s: how to reset?", target_name(target));
1875 return ERROR_FAIL;
1876 }
1877
1878 /* registers are now invalid */
1879 if (target_was_examined(target))
1880 register_cache_invalidate(armv7a->arm.core_cache);
1881
1882 target->state = TARGET_RESET;
1883
1884 return ERROR_OK;
1885 }
1886
1887 static int cortex_a_deassert_reset(struct target *target)
1888 {
1889 int retval;
1890
1891 LOG_DEBUG(" ");
1892
1893 /* be certain SRST is off */
1894 jtag_add_reset(0, 0);
1895
1896 if (target_was_examined(target)) {
1897 retval = cortex_a_poll(target);
1898 if (retval != ERROR_OK)
1899 return retval;
1900 }
1901
1902 if (target->reset_halt) {
1903 if (target->state != TARGET_HALTED) {
1904 LOG_WARNING("%s: ran after reset and before halt ...",
1905 target_name(target));
1906 if (target_was_examined(target)) {
1907 retval = target_halt(target);
1908 if (retval != ERROR_OK)
1909 return retval;
1910 } else
1911 target->state = TARGET_UNKNOWN;
1912 }
1913 }
1914
1915 return ERROR_OK;
1916 }
1917
1918 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1919 {
1920 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1921 * New desired mode must be in mode. Current value of DSCR must be in
1922 * *dscr, which is updated with new value.
1923 *
1924 * This function elides actually sending the mode-change over the debug
1925 * interface if the mode is already set as desired.
1926 */
1927 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1928 if (new_dscr != *dscr) {
1929 struct armv7a_common *armv7a = target_to_armv7a(target);
1930 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1931 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1932 if (retval == ERROR_OK)
1933 *dscr = new_dscr;
1934 return retval;
1935 } else {
1936 return ERROR_OK;
1937 }
1938 }
1939
1940 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1941 uint32_t value, uint32_t *dscr)
1942 {
1943 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1944 struct armv7a_common *armv7a = target_to_armv7a(target);
1945 int64_t then = timeval_ms();
1946 int retval;
1947
1948 while ((*dscr & mask) != value) {
1949 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1950 armv7a->debug_base + CPUDBG_DSCR, dscr);
1951 if (retval != ERROR_OK)
1952 return retval;
1953 if (timeval_ms() > then + 1000) {
1954 LOG_ERROR("timeout waiting for DSCR bit change");
1955 return ERROR_FAIL;
1956 }
1957 }
1958 return ERROR_OK;
1959 }
1960
1961 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1962 uint32_t *data, uint32_t *dscr)
1963 {
1964 int retval;
1965 struct armv7a_common *armv7a = target_to_armv7a(target);
1966
1967 /* Move from coprocessor to R0. */
1968 retval = cortex_a_exec_opcode(target, opcode, dscr);
1969 if (retval != ERROR_OK)
1970 return retval;
1971
1972 /* Move from R0 to DTRTX. */
1973 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1974 if (retval != ERROR_OK)
1975 return retval;
1976
1977 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1978 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1979 * must also check TXfull_l). Most of the time this will be free
1980 * because TXfull_l will be set immediately and cached in dscr. */
1981 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1982 DSCR_DTRTX_FULL_LATCHED, dscr);
1983 if (retval != ERROR_OK)
1984 return retval;
1985
1986 /* Read the value transferred to DTRTX. */
1987 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1988 armv7a->debug_base + CPUDBG_DTRTX, data);
1989 if (retval != ERROR_OK)
1990 return retval;
1991
1992 return ERROR_OK;
1993 }
1994
1995 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1996 uint32_t *dfsr, uint32_t *dscr)
1997 {
1998 int retval;
1999
2000 if (dfar) {
2001 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2002 if (retval != ERROR_OK)
2003 return retval;
2004 }
2005
2006 if (dfsr) {
2007 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2008 if (retval != ERROR_OK)
2009 return retval;
2010 }
2011
2012 return ERROR_OK;
2013 }
2014
2015 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2016 uint32_t data, uint32_t *dscr)
2017 {
2018 int retval;
2019 struct armv7a_common *armv7a = target_to_armv7a(target);
2020
2021 /* Write the value into DTRRX. */
2022 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2023 armv7a->debug_base + CPUDBG_DTRRX, data);
2024 if (retval != ERROR_OK)
2025 return retval;
2026
2027 /* Move from DTRRX to R0. */
2028 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2029 if (retval != ERROR_OK)
2030 return retval;
2031
2032 /* Move from R0 to coprocessor. */
2033 retval = cortex_a_exec_opcode(target, opcode, dscr);
2034 if (retval != ERROR_OK)
2035 return retval;
2036
2037 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2038 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2039 * check RXfull_l). Most of the time this will be free because RXfull_l
2040 * will be cleared immediately and cached in dscr. */
2041 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2042 if (retval != ERROR_OK)
2043 return retval;
2044
2045 return ERROR_OK;
2046 }
2047
2048 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2049 uint32_t dfsr, uint32_t *dscr)
2050 {
2051 int retval;
2052
2053 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2054 if (retval != ERROR_OK)
2055 return retval;
2056
2057 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2058 if (retval != ERROR_OK)
2059 return retval;
2060
2061 return ERROR_OK;
2062 }
2063
2064 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2065 {
2066 uint32_t status, upper4;
2067
2068 if (dfsr & (1 << 9)) {
2069 /* LPAE format. */
2070 status = dfsr & 0x3f;
2071 upper4 = status >> 2;
2072 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2073 return ERROR_TARGET_TRANSLATION_FAULT;
2074 else if (status == 33)
2075 return ERROR_TARGET_UNALIGNED_ACCESS;
2076 else
2077 return ERROR_TARGET_DATA_ABORT;
2078 } else {
2079 /* Normal format. */
2080 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2081 if (status == 1)
2082 return ERROR_TARGET_UNALIGNED_ACCESS;
2083 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2084 status == 9 || status == 11 || status == 13 || status == 15)
2085 return ERROR_TARGET_TRANSLATION_FAULT;
2086 else
2087 return ERROR_TARGET_DATA_ABORT;
2088 }
2089 }
2090
2091 static int cortex_a_write_cpu_memory_slow(struct target *target,
2092 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2093 {
2094 /* Writes count objects of size size from *buffer. Old value of DSCR must
2095 * be in *dscr; updated to new value. This is slow because it works for
2096 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2097 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2098 * preferred.
2099 * Preconditions:
2100 * - Address is in R0.
2101 * - R0 is marked dirty.
2102 */
2103 struct armv7a_common *armv7a = target_to_armv7a(target);
2104 struct arm *arm = &armv7a->arm;
2105 int retval;
2106
2107 /* Mark register R1 as dirty, to use for transferring data. */
2108 arm_reg_current(arm, 1)->dirty = true;
2109
2110 /* Switch to non-blocking mode if not already in that mode. */
2111 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2112 if (retval != ERROR_OK)
2113 return retval;
2114
2115 /* Go through the objects. */
2116 while (count) {
2117 /* Write the value to store into DTRRX. */
2118 uint32_t data, opcode;
2119 if (size == 1)
2120 data = *buffer;
2121 else if (size == 2)
2122 data = target_buffer_get_u16(target, buffer);
2123 else
2124 data = target_buffer_get_u32(target, buffer);
2125 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2126 armv7a->debug_base + CPUDBG_DTRRX, data);
2127 if (retval != ERROR_OK)
2128 return retval;
2129
2130 /* Transfer the value from DTRRX to R1. */
2131 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2132 if (retval != ERROR_OK)
2133 return retval;
2134
2135 /* Write the value transferred to R1 into memory. */
2136 if (size == 1)
2137 opcode = ARMV4_5_STRB_IP(1, 0);
2138 else if (size == 2)
2139 opcode = ARMV4_5_STRH_IP(1, 0);
2140 else
2141 opcode = ARMV4_5_STRW_IP(1, 0);
2142 retval = cortex_a_exec_opcode(target, opcode, dscr);
2143 if (retval != ERROR_OK)
2144 return retval;
2145
2146 /* Check for faults and return early. */
2147 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2148 return ERROR_OK; /* A data fault is not considered a system failure. */
2149
2150 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2151 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2152 * must also check RXfull_l). Most of the time this will be free
2153 * because RXfull_l will be cleared immediately and cached in dscr. */
2154 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2155 if (retval != ERROR_OK)
2156 return retval;
2157
2158 /* Advance. */
2159 buffer += size;
2160 --count;
2161 }
2162
2163 return ERROR_OK;
2164 }
2165
2166 static int cortex_a_write_cpu_memory_fast(struct target *target,
2167 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2168 {
2169 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2170 * in *dscr; updated to new value. This is fast but only works for
2171 * word-sized objects at aligned addresses.
2172 * Preconditions:
2173 * - Address is in R0 and must be a multiple of 4.
2174 * - R0 is marked dirty.
2175 */
2176 struct armv7a_common *armv7a = target_to_armv7a(target);
2177 int retval;
2178
2179 /* Switch to fast mode if not already in that mode. */
2180 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2181 if (retval != ERROR_OK)
2182 return retval;
2183
2184 /* Latch STC instruction. */
2185 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2186 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2187 if (retval != ERROR_OK)
2188 return retval;
2189
2190 /* Transfer all the data and issue all the instructions. */
2191 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2192 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2193 }
2194
2195 static int cortex_a_write_cpu_memory(struct target *target,
2196 uint32_t address, uint32_t size,
2197 uint32_t count, const uint8_t *buffer)
2198 {
2199 /* Write memory through the CPU. */
2200 int retval, final_retval;
2201 struct armv7a_common *armv7a = target_to_armv7a(target);
2202 struct arm *arm = &armv7a->arm;
2203 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2204
2205 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2206 address, size, count);
2207 if (target->state != TARGET_HALTED) {
2208 LOG_WARNING("target not halted");
2209 return ERROR_TARGET_NOT_HALTED;
2210 }
2211
2212 if (!count)
2213 return ERROR_OK;
2214
2215 /* Clear any abort. */
2216 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2217 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2218 if (retval != ERROR_OK)
2219 return retval;
2220
2221 /* Read DSCR. */
2222 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2223 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2224 if (retval != ERROR_OK)
2225 return retval;
2226
2227 /* Switch to non-blocking mode if not already in that mode. */
2228 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2229 if (retval != ERROR_OK)
2230 goto out;
2231
2232 /* Mark R0 as dirty. */
2233 arm_reg_current(arm, 0)->dirty = true;
2234
2235 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2236 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2237 if (retval != ERROR_OK)
2238 goto out;
2239
2240 /* Get the memory address into R0. */
2241 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2242 armv7a->debug_base + CPUDBG_DTRRX, address);
2243 if (retval != ERROR_OK)
2244 goto out;
2245 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2246 if (retval != ERROR_OK)
2247 goto out;
2248
2249 if (size == 4 && (address % 4) == 0) {
2250 /* We are doing a word-aligned transfer, so use fast mode. */
2251 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2252 } else {
2253 /* Use slow path. */
2254 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2255 }
2256
2257 out:
2258 final_retval = retval;
2259
2260 /* Switch to non-blocking mode if not already in that mode. */
2261 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2262 if (final_retval == ERROR_OK)
2263 final_retval = retval;
2264
2265 /* Wait for last issued instruction to complete. */
2266 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2267 if (final_retval == ERROR_OK)
2268 final_retval = retval;
2269
2270 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2271 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2272 * check RXfull_l). Most of the time this will be free because RXfull_l
2273 * will be cleared immediately and cached in dscr. However, don't do this
2274 * if there is fault, because then the instruction might not have completed
2275 * successfully. */
2276 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2277 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2278 if (retval != ERROR_OK)
2279 return retval;
2280 }
2281
2282 /* If there were any sticky abort flags, clear them. */
2283 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2284 fault_dscr = dscr;
2285 mem_ap_write_atomic_u32(armv7a->debug_ap,
2286 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2287 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2288 } else {
2289 fault_dscr = 0;
2290 }
2291
2292 /* Handle synchronous data faults. */
2293 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2294 if (final_retval == ERROR_OK) {
2295 /* Final return value will reflect cause of fault. */
2296 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2297 if (retval == ERROR_OK) {
2298 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2299 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2300 } else
2301 final_retval = retval;
2302 }
2303 /* Fault destroyed DFAR/DFSR; restore them. */
2304 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2305 if (retval != ERROR_OK)
2306 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2307 }
2308
2309 /* Handle asynchronous data faults. */
2310 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2311 if (final_retval == ERROR_OK)
2312 /* No other error has been recorded so far, so keep this one. */
2313 final_retval = ERROR_TARGET_DATA_ABORT;
2314 }
2315
2316 /* If the DCC is nonempty, clear it. */
2317 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2318 uint32_t dummy;
2319 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2320 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2321 if (final_retval == ERROR_OK)
2322 final_retval = retval;
2323 }
2324 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2325 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2326 if (final_retval == ERROR_OK)
2327 final_retval = retval;
2328 }
2329
2330 /* Done. */
2331 return final_retval;
2332 }
2333
2334 static int cortex_a_read_cpu_memory_slow(struct target *target,
2335 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2336 {
2337 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2338 * in *dscr; updated to new value. This is slow because it works for
2339 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2340 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2341 * preferred.
2342 * Preconditions:
2343 * - Address is in R0.
2344 * - R0 is marked dirty.
2345 */
2346 struct armv7a_common *armv7a = target_to_armv7a(target);
2347 struct arm *arm = &armv7a->arm;
2348 int retval;
2349
2350 /* Mark register R1 as dirty, to use for transferring data. */
2351 arm_reg_current(arm, 1)->dirty = true;
2352
2353 /* Switch to non-blocking mode if not already in that mode. */
2354 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2355 if (retval != ERROR_OK)
2356 return retval;
2357
2358 /* Go through the objects. */
2359 while (count) {
2360 /* Issue a load of the appropriate size to R1. */
2361 uint32_t opcode, data;
2362 if (size == 1)
2363 opcode = ARMV4_5_LDRB_IP(1, 0);
2364 else if (size == 2)
2365 opcode = ARMV4_5_LDRH_IP(1, 0);
2366 else
2367 opcode = ARMV4_5_LDRW_IP(1, 0);
2368 retval = cortex_a_exec_opcode(target, opcode, dscr);
2369 if (retval != ERROR_OK)
2370 return retval;
2371
2372 /* Issue a write of R1 to DTRTX. */
2373 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2374 if (retval != ERROR_OK)
2375 return retval;
2376
2377 /* Check for faults and return early. */
2378 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2379 return ERROR_OK; /* A data fault is not considered a system failure. */
2380
2381 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2382 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2383 * must also check TXfull_l). Most of the time this will be free
2384 * because TXfull_l will be set immediately and cached in dscr. */
2385 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2386 DSCR_DTRTX_FULL_LATCHED, dscr);
2387 if (retval != ERROR_OK)
2388 return retval;
2389
2390 /* Read the value transferred to DTRTX into the buffer. */
2391 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2392 armv7a->debug_base + CPUDBG_DTRTX, &data);
2393 if (retval != ERROR_OK)
2394 return retval;
2395 if (size == 1)
2396 *buffer = (uint8_t) data;
2397 else if (size == 2)
2398 target_buffer_set_u16(target, buffer, (uint16_t) data);
2399 else
2400 target_buffer_set_u32(target, buffer, data);
2401
2402 /* Advance. */
2403 buffer += size;
2404 --count;
2405 }
2406
2407 return ERROR_OK;
2408 }
2409
2410 static int cortex_a_read_cpu_memory_fast(struct target *target,
2411 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2412 {
2413 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2414 * *dscr; updated to new value. This is fast but only works for word-sized
2415 * objects at aligned addresses.
2416 * Preconditions:
2417 * - Address is in R0 and must be a multiple of 4.
2418 * - R0 is marked dirty.
2419 */
2420 struct armv7a_common *armv7a = target_to_armv7a(target);
2421 uint32_t u32;
2422 int retval;
2423
2424 /* Switch to non-blocking mode if not already in that mode. */
2425 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2426 if (retval != ERROR_OK)
2427 return retval;
2428
2429 /* Issue the LDC instruction via a write to ITR. */
2430 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2431 if (retval != ERROR_OK)
2432 return retval;
2433
2434 count--;
2435
2436 if (count > 0) {
2437 /* Switch to fast mode if not already in that mode. */
2438 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2439 if (retval != ERROR_OK)
2440 return retval;
2441
2442 /* Latch LDC instruction. */
2443 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2444 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2445 if (retval != ERROR_OK)
2446 return retval;
2447
2448 /* Read the value transferred to DTRTX into the buffer. Due to fast
2449 * mode rules, this blocks until the instruction finishes executing and
2450 * then reissues the read instruction to read the next word from
2451 * memory. The last read of DTRTX in this call reads the second-to-last
2452 * word from memory and issues the read instruction for the last word.
2453 */
2454 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2455 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2456 if (retval != ERROR_OK)
2457 return retval;
2458
2459 /* Advance. */
2460 buffer += count * 4;
2461 }
2462
2463 /* Wait for last issued instruction to complete. */
2464 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2465 if (retval != ERROR_OK)
2466 return retval;
2467
2468 /* Switch to non-blocking mode if not already in that mode. */
2469 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2470 if (retval != ERROR_OK)
2471 return retval;
2472
2473 /* Check for faults and return early. */
2474 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2475 return ERROR_OK; /* A data fault is not considered a system failure. */
2476
2477 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2478 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2479 * check TXfull_l). Most of the time this will be free because TXfull_l
2480 * will be set immediately and cached in dscr. */
2481 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2482 DSCR_DTRTX_FULL_LATCHED, dscr);
2483 if (retval != ERROR_OK)
2484 return retval;
2485
2486 /* Read the value transferred to DTRTX into the buffer. This is the last
2487 * word. */
2488 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2489 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2490 if (retval != ERROR_OK)
2491 return retval;
2492 target_buffer_set_u32(target, buffer, u32);
2493
2494 return ERROR_OK;
2495 }
2496
2497 static int cortex_a_read_cpu_memory(struct target *target,
2498 uint32_t address, uint32_t size,
2499 uint32_t count, uint8_t *buffer)
2500 {
2501 /* Read memory through the CPU. */
2502 int retval, final_retval;
2503 struct armv7a_common *armv7a = target_to_armv7a(target);
2504 struct arm *arm = &armv7a->arm;
2505 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2506
2507 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2508 address, size, count);
2509 if (target->state != TARGET_HALTED) {
2510 LOG_WARNING("target not halted");
2511 return ERROR_TARGET_NOT_HALTED;
2512 }
2513
2514 if (!count)
2515 return ERROR_OK;
2516
2517 /* Clear any abort. */
2518 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2519 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2520 if (retval != ERROR_OK)
2521 return retval;
2522
2523 /* Read DSCR */
2524 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2525 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2526 if (retval != ERROR_OK)
2527 return retval;
2528
2529 /* Switch to non-blocking mode if not already in that mode. */
2530 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2531 if (retval != ERROR_OK)
2532 goto out;
2533
2534 /* Mark R0 as dirty. */
2535 arm_reg_current(arm, 0)->dirty = true;
2536
2537 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2538 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2539 if (retval != ERROR_OK)
2540 goto out;
2541
2542 /* Get the memory address into R0. */
2543 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2544 armv7a->debug_base + CPUDBG_DTRRX, address);
2545 if (retval != ERROR_OK)
2546 goto out;
2547 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2548 if (retval != ERROR_OK)
2549 goto out;
2550
2551 if (size == 4 && (address % 4) == 0) {
2552 /* We are doing a word-aligned transfer, so use fast mode. */
2553 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2554 } else {
2555 /* Use slow path. */
2556 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2557 }
2558
2559 out:
2560 final_retval = retval;
2561
2562 /* Switch to non-blocking mode if not already in that mode. */
2563 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2564 if (final_retval == ERROR_OK)
2565 final_retval = retval;
2566
2567 /* Wait for last issued instruction to complete. */
2568 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2569 if (final_retval == ERROR_OK)
2570 final_retval = retval;
2571
2572 /* If there were any sticky abort flags, clear them. */
2573 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2574 fault_dscr = dscr;
2575 mem_ap_write_atomic_u32(armv7a->debug_ap,
2576 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2577 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2578 } else {
2579 fault_dscr = 0;
2580 }
2581
2582 /* Handle synchronous data faults. */
2583 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2584 if (final_retval == ERROR_OK) {
2585 /* Final return value will reflect cause of fault. */
2586 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2587 if (retval == ERROR_OK) {
2588 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2589 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2590 } else
2591 final_retval = retval;
2592 }
2593 /* Fault destroyed DFAR/DFSR; restore them. */
2594 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2595 if (retval != ERROR_OK)
2596 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2597 }
2598
2599 /* Handle asynchronous data faults. */
2600 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2601 if (final_retval == ERROR_OK)
2602 /* No other error has been recorded so far, so keep this one. */
2603 final_retval = ERROR_TARGET_DATA_ABORT;
2604 }
2605
2606 /* If the DCC is nonempty, clear it. */
2607 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2608 uint32_t dummy;
2609 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2610 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2611 if (final_retval == ERROR_OK)
2612 final_retval = retval;
2613 }
2614 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2615 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2616 if (final_retval == ERROR_OK)
2617 final_retval = retval;
2618 }
2619
2620 /* Done. */
2621 return final_retval;
2622 }
2623
2624
2625 /*
2626 * Cortex-A Memory access
2627 *
2628 * This is same Cortex-M3 but we must also use the correct
2629 * ap number for every access.
2630 */
2631
2632 static int cortex_a_read_phys_memory(struct target *target,
2633 uint32_t address, uint32_t size,
2634 uint32_t count, uint8_t *buffer)
2635 {
2636 struct armv7a_common *armv7a = target_to_armv7a(target);
2637 struct adiv5_dap *swjdp = armv7a->arm.dap;
2638 uint8_t apsel = swjdp->apsel;
2639 int retval;
2640
2641 if (!count || !buffer)
2642 return ERROR_COMMAND_SYNTAX_ERROR;
2643
2644 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2645 address, size, count);
2646
2647 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2648 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2649
2650 /* read memory through the CPU */
2651 cortex_a_prep_memaccess(target, 1);
2652 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2653 cortex_a_post_memaccess(target, 1);
2654
2655 return retval;
2656 }
2657
2658 static int cortex_a_read_memory(struct target *target, uint32_t address,
2659 uint32_t size, uint32_t count, uint8_t *buffer)
2660 {
2661 int retval;
2662
2663 /* cortex_a handles unaligned memory access */
2664 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2665 size, count);
2666
2667 cortex_a_prep_memaccess(target, 0);
2668 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2669 cortex_a_post_memaccess(target, 0);
2670
2671 return retval;
2672 }
2673
2674 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2675 uint32_t size, uint32_t count, uint8_t *buffer)
2676 {
2677 int mmu_enabled = 0;
2678 uint32_t virt, phys;
2679 int retval;
2680 struct armv7a_common *armv7a = target_to_armv7a(target);
2681 struct adiv5_dap *swjdp = armv7a->arm.dap;
2682 uint8_t apsel = swjdp->apsel;
2683
2684 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2685 return target_read_memory(target, address, size, count, buffer);
2686
2687 /* cortex_a handles unaligned memory access */
2688 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2689 size, count);
2690
2691 /* determine if MMU was enabled on target stop */
2692 if (!armv7a->is_armv7r) {
2693 retval = cortex_a_mmu(target, &mmu_enabled);
2694 if (retval != ERROR_OK)
2695 return retval;
2696 }
2697
2698 if (mmu_enabled) {
2699 virt = address;
2700 retval = cortex_a_virt2phys(target, virt, &phys);
2701 if (retval != ERROR_OK)
2702 return retval;
2703
2704 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2705 virt, phys);
2706 address = phys;
2707 }
2708
2709 if (!count || !buffer)
2710 return ERROR_COMMAND_SYNTAX_ERROR;
2711
2712 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2713
2714 return retval;
2715 }
2716
2717 static int cortex_a_write_phys_memory(struct target *target,
2718 uint32_t address, uint32_t size,
2719 uint32_t count, const uint8_t *buffer)
2720 {
2721 struct armv7a_common *armv7a = target_to_armv7a(target);
2722 struct adiv5_dap *swjdp = armv7a->arm.dap;
2723 uint8_t apsel = swjdp->apsel;
2724 int retval;
2725
2726 if (!count || !buffer)
2727 return ERROR_COMMAND_SYNTAX_ERROR;
2728
2729 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2730 size, count);
2731
2732 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2733 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2734
2735 /* write memory through the CPU */
2736 cortex_a_prep_memaccess(target, 1);
2737 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2738 cortex_a_post_memaccess(target, 1);
2739
2740 return retval;
2741 }
2742
2743 static int cortex_a_write_memory(struct target *target, uint32_t address,
2744 uint32_t size, uint32_t count, const uint8_t *buffer)
2745 {
2746 int retval;
2747
2748 /* cortex_a handles unaligned memory access */
2749 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2750 size, count);
2751
2752 /* memory writes bypass the caches, must flush before writing */
2753 armv7a_cache_auto_flush_on_write(target, address, size * count);
2754
2755 cortex_a_prep_memaccess(target, 0);
2756 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2757 cortex_a_post_memaccess(target, 0);
2758 return retval;
2759 }
2760
2761 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2762 uint32_t size, uint32_t count, const uint8_t *buffer)
2763 {
2764 int mmu_enabled = 0;
2765 uint32_t virt, phys;
2766 int retval;
2767 struct armv7a_common *armv7a = target_to_armv7a(target);
2768 struct adiv5_dap *swjdp = armv7a->arm.dap;
2769 uint8_t apsel = swjdp->apsel;
2770
2771 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2772 return target_write_memory(target, address, size, count, buffer);
2773
2774 /* cortex_a handles unaligned memory access */
2775 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2776 size, count);
2777
2778 /* determine if MMU was enabled on target stop */
2779 if (!armv7a->is_armv7r) {
2780 retval = cortex_a_mmu(target, &mmu_enabled);
2781 if (retval != ERROR_OK)
2782 return retval;
2783 }
2784
2785 if (mmu_enabled) {
2786 virt = address;
2787 retval = cortex_a_virt2phys(target, virt, &phys);
2788 if (retval != ERROR_OK)
2789 return retval;
2790
2791 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2792 virt,
2793 phys);
2794 address = phys;
2795 }
2796
2797 if (!count || !buffer)
2798 return ERROR_COMMAND_SYNTAX_ERROR;
2799
2800 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2801
2802 return retval;
2803 }
2804
2805 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2806 uint32_t count, uint8_t *buffer)
2807 {
2808 uint32_t size;
2809
2810 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2811 * will have something to do with the size we leave to it. */
2812 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2813 if (address & size) {
2814 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2815 if (retval != ERROR_OK)
2816 return retval;
2817 address += size;
2818 count -= size;
2819 buffer += size;
2820 }
2821 }
2822
2823 /* Read the data with as large access size as possible. */
2824 for (; size > 0; size /= 2) {
2825 uint32_t aligned = count - count % size;
2826 if (aligned > 0) {
2827 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2828 if (retval != ERROR_OK)
2829 return retval;
2830 address += aligned;
2831 count -= aligned;
2832 buffer += aligned;
2833 }
2834 }
2835
2836 return ERROR_OK;
2837 }
2838
2839 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2840 uint32_t count, const uint8_t *buffer)
2841 {
2842 uint32_t size;
2843
2844 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2845 * will have something to do with the size we leave to it. */
2846 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2847 if (address & size) {
2848 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2849 if (retval != ERROR_OK)
2850 return retval;
2851 address += size;
2852 count -= size;
2853 buffer += size;
2854 }
2855 }
2856
2857 /* Write the data with as large access size as possible. */
2858 for (; size > 0; size /= 2) {
2859 uint32_t aligned = count - count % size;
2860 if (aligned > 0) {
2861 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2862 if (retval != ERROR_OK)
2863 return retval;
2864 address += aligned;
2865 count -= aligned;
2866 buffer += aligned;
2867 }
2868 }
2869
2870 return ERROR_OK;
2871 }
2872
2873 static int cortex_a_handle_target_request(void *priv)
2874 {
2875 struct target *target = priv;
2876 struct armv7a_common *armv7a = target_to_armv7a(target);
2877 int retval;
2878
2879 if (!target_was_examined(target))
2880 return ERROR_OK;
2881 if (!target->dbg_msg_enabled)
2882 return ERROR_OK;
2883
2884 if (target->state == TARGET_RUNNING) {
2885 uint32_t request;
2886 uint32_t dscr;
2887 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2888 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2889
2890 /* check if we have data */
2891 int64_t then = timeval_ms();
2892 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2893 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2894 armv7a->debug_base + CPUDBG_DTRTX, &request);
2895 if (retval == ERROR_OK) {
2896 target_request(target, request);
2897 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2898 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2899 }
2900 if (timeval_ms() > then + 1000) {
2901 LOG_ERROR("Timeout waiting for dtr tx full");
2902 return ERROR_FAIL;
2903 }
2904 }
2905 }
2906
2907 return ERROR_OK;
2908 }
2909
2910 /*
2911 * Cortex-A target information and configuration
2912 */
2913
2914 static int cortex_a_examine_first(struct target *target)
2915 {
2916 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2917 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2918 struct adiv5_dap *swjdp = armv7a->arm.dap;
2919
2920 int i;
2921 int retval = ERROR_OK;
2922 uint32_t didr, cpuid, dbg_osreg;
2923
2924 retval = dap_dp_init(swjdp);
2925 if (retval != ERROR_OK) {
2926 LOG_ERROR("Could not initialize the debug port");
2927 return retval;
2928 }
2929
2930 /* Search for the APB-AP - it is needed for access to debug registers */
2931 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2932 if (retval != ERROR_OK) {
2933 LOG_ERROR("Could not find APB-AP for debug access");
2934 return retval;
2935 }
2936
2937 retval = mem_ap_init(armv7a->debug_ap);
2938 if (retval != ERROR_OK) {
2939 LOG_ERROR("Could not initialize the APB-AP");
2940 return retval;
2941 }
2942
2943 armv7a->debug_ap->memaccess_tck = 80;
2944
2945 /* Search for the AHB-AB.
2946 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2947 * can access system memory. */
2948 armv7a->memory_ap_available = false;
2949 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2950 if (retval == ERROR_OK) {
2951 retval = mem_ap_init(armv7a->memory_ap);
2952 if (retval == ERROR_OK)
2953 armv7a->memory_ap_available = true;
2954 }
2955 if (retval != ERROR_OK) {
2956 /* AHB-AP not found or unavailable - use the CPU */
2957 LOG_DEBUG("No AHB-AP available for memory access");
2958 }
2959
2960 if (!target->dbgbase_set) {
2961 uint32_t dbgbase;
2962 /* Get ROM Table base */
2963 uint32_t apid;
2964 int32_t coreidx = target->coreid;
2965 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2966 target->cmd_name);
2967 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2968 if (retval != ERROR_OK)
2969 return retval;
2970 /* Lookup 0x15 -- Processor DAP */
2971 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2972 &armv7a->debug_base, &coreidx);
2973 if (retval != ERROR_OK) {
2974 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2975 target->cmd_name);
2976 return retval;
2977 }
2978 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2979 target->coreid, armv7a->debug_base);
2980 } else
2981 armv7a->debug_base = target->dbgbase;
2982
2983 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2984 armv7a->debug_base + CPUDBG_DIDR, &didr);
2985 if (retval != ERROR_OK) {
2986 LOG_DEBUG("Examine %s failed", "DIDR");
2987 return retval;
2988 }
2989
2990 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2991 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2992 if (retval != ERROR_OK) {
2993 LOG_DEBUG("Examine %s failed", "CPUID");
2994 return retval;
2995 }
2996
2997 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2998 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2999
3000 cortex_a->didr = didr;
3001 cortex_a->cpuid = cpuid;
3002
3003 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3004 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3005 if (retval != ERROR_OK)
3006 return retval;
3007 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3008
3009 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3010 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
3011 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3012 return ERROR_TARGET_INIT_FAILED;
3013 }
3014
3015 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3016 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
3017
3018 /* Read DBGOSLSR and check if OSLK is implemented */
3019 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3020 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3021 if (retval != ERROR_OK)
3022 return retval;
3023 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
3024
3025 /* check if OS Lock is implemented */
3026 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3027 /* check if OS Lock is set */
3028 if (dbg_osreg & OSLSR_OSLK) {
3029 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
3030
3031 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3032 armv7a->debug_base + CPUDBG_OSLAR,
3033 0);
3034 if (retval == ERROR_OK)
3035 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3036 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3037
3038 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3039 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3040 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3041 target->coreid);
3042 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3043 return ERROR_TARGET_INIT_FAILED;
3044 }
3045 }
3046 }
3047
3048 armv7a->arm.core_type = ARM_MODE_MON;
3049
3050 /* Avoid recreating the registers cache */
3051 if (!target_was_examined(target)) {
3052 retval = cortex_a_dpm_setup(cortex_a, didr);
3053 if (retval != ERROR_OK)
3054 return retval;
3055 }
3056
3057 /* Setup Breakpoint Register Pairs */
3058 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3059 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3060 cortex_a->brp_num_available = cortex_a->brp_num;
3061 free(cortex_a->brp_list);
3062 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3063 /* cortex_a->brb_enabled = ????; */
3064 for (i = 0; i < cortex_a->brp_num; i++) {
3065 cortex_a->brp_list[i].used = 0;
3066 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3067 cortex_a->brp_list[i].type = BRP_NORMAL;
3068 else
3069 cortex_a->brp_list[i].type = BRP_CONTEXT;
3070 cortex_a->brp_list[i].value = 0;
3071 cortex_a->brp_list[i].control = 0;
3072 cortex_a->brp_list[i].BRPn = i;
3073 }
3074
3075 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3076
3077 /* select debug_ap as default */
3078 swjdp->apsel = armv7a->debug_ap->ap_num;
3079
3080 target_set_examined(target);
3081 return ERROR_OK;
3082 }
3083
3084 static int cortex_a_examine(struct target *target)
3085 {
3086 int retval = ERROR_OK;
3087
3088 /* Reestablish communication after target reset */
3089 retval = cortex_a_examine_first(target);
3090
3091 /* Configure core debug access */
3092 if (retval == ERROR_OK)
3093 retval = cortex_a_init_debug_access(target);
3094
3095 return retval;
3096 }
3097
3098 /*
3099 * Cortex-A target creation and initialization
3100 */
3101
3102 static int cortex_a_init_target(struct command_context *cmd_ctx,
3103 struct target *target)
3104 {
3105 /* examine_first() does a bunch of this */
3106 arm_semihosting_init(target);
3107 return ERROR_OK;
3108 }
3109
3110 static int cortex_a_init_arch_info(struct target *target,
3111 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3112 {
3113 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3114
3115 /* Setup struct cortex_a_common */
3116 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3117
3118 /* tap has no dap initialized */
3119 if (!tap->dap) {
3120 tap->dap = dap_init();
3121
3122 /* Leave (only) generic DAP stuff for debugport_init() */
3123 tap->dap->tap = tap;
3124 }
3125
3126 armv7a->arm.dap = tap->dap;
3127
3128 cortex_a->fast_reg_read = 0;
3129
3130 /* register arch-specific functions */
3131 armv7a->examine_debug_reason = NULL;
3132
3133 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3134
3135 armv7a->pre_restore_context = NULL;
3136
3137 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3138
3139
3140 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3141
3142 /* REVISIT v7a setup should be in a v7a-specific routine */
3143 armv7a_init_arch_info(target, armv7a);
3144 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3145
3146 return ERROR_OK;
3147 }
3148
3149 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3150 {
3151 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3152
3153 cortex_a->armv7a_common.is_armv7r = false;
3154
3155 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3156 }
3157
3158 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3159 {
3160 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3161
3162 cortex_a->armv7a_common.is_armv7r = true;
3163
3164 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3165 }
3166
3167 static void cortex_a_deinit_target(struct target *target)
3168 {
3169 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3170 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3171
3172 free(cortex_a->brp_list);
3173 free(dpm->dbp);
3174 free(dpm->dwp);
3175 free(cortex_a);
3176 }
3177
3178 static int cortex_a_mmu(struct target *target, int *enabled)
3179 {
3180 struct armv7a_common *armv7a = target_to_armv7a(target);
3181
3182 if (target->state != TARGET_HALTED) {
3183 LOG_ERROR("%s: target not halted", __func__);
3184 return ERROR_TARGET_INVALID;
3185 }
3186
3187 if (armv7a->is_armv7r)
3188 *enabled = 0;
3189 else
3190 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3191
3192 return ERROR_OK;
3193 }
3194
3195 static int cortex_a_virt2phys(struct target *target,
3196 uint32_t virt, uint32_t *phys)
3197 {
3198 int retval = ERROR_FAIL;
3199 struct armv7a_common *armv7a = target_to_armv7a(target);
3200 struct adiv5_dap *swjdp = armv7a->arm.dap;
3201 uint8_t apsel = swjdp->apsel;
3202 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3203 uint32_t ret;
3204 retval = armv7a_mmu_translate_va(target,
3205 virt, &ret);
3206 if (retval != ERROR_OK)
3207 goto done;
3208 *phys = ret;
3209 } else {/* use this method if armv7a->memory_ap not selected
3210 * mmu must be enable in order to get a correct translation */
3211 retval = cortex_a_mmu_modify(target, 1);
3212 if (retval != ERROR_OK)
3213 goto done;
3214 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3215 }
3216 done:
3217 return retval;
3218 }
3219
3220 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3221 {
3222 struct target *target = get_current_target(CMD_CTX);
3223 struct armv7a_common *armv7a = target_to_armv7a(target);
3224
3225 return armv7a_handle_cache_info_command(CMD_CTX,
3226 &armv7a->armv7a_mmu.armv7a_cache);
3227 }
3228
3229
3230 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3231 {
3232 struct target *target = get_current_target(CMD_CTX);
3233 if (!target_was_examined(target)) {
3234 LOG_ERROR("target not examined yet");
3235 return ERROR_FAIL;
3236 }
3237
3238 return cortex_a_init_debug_access(target);
3239 }
3240 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3241 {
3242 struct target *target = get_current_target(CMD_CTX);
3243 /* check target is an smp target */
3244 struct target_list *head;
3245 struct target *curr;
3246 head = target->head;
3247 target->smp = 0;
3248 if (head != (struct target_list *)NULL) {
3249 while (head != (struct target_list *)NULL) {
3250 curr = head->target;
3251 curr->smp = 0;
3252 head = head->next;
3253 }
3254 /* fixes the target display to the debugger */
3255 target->gdb_service->target = target;
3256 }
3257 return ERROR_OK;
3258 }
3259
3260 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3261 {
3262 struct target *target = get_current_target(CMD_CTX);
3263 struct target_list *head;
3264 struct target *curr;
3265 head = target->head;
3266 if (head != (struct target_list *)NULL) {
3267 target->smp = 1;
3268 while (head != (struct target_list *)NULL) {
3269 curr = head->target;
3270 curr->smp = 1;
3271 head = head->next;
3272 }
3273 }
3274 return ERROR_OK;
3275 }
3276
3277 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3278 {
3279 struct target *target = get_current_target(CMD_CTX);
3280 int retval = ERROR_OK;
3281 struct target_list *head;
3282 head = target->head;
3283 if (head != (struct target_list *)NULL) {
3284 if (CMD_ARGC == 1) {
3285 int coreid = 0;
3286 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3287 if (ERROR_OK != retval)
3288 return retval;
3289 target->gdb_service->core[1] = coreid;
3290
3291 }
3292 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3293 , target->gdb_service->core[1]);
3294 }
3295 return ERROR_OK;
3296 }
3297
3298 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3299 {
3300 struct target *target = get_current_target(CMD_CTX);
3301 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3302
3303 static const Jim_Nvp nvp_maskisr_modes[] = {
3304 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3305 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3306 { .name = NULL, .value = -1 },
3307 };
3308 const Jim_Nvp *n;
3309
3310 if (CMD_ARGC > 0) {
3311 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3312 if (n->name == NULL) {
3313 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3314 return ERROR_COMMAND_SYNTAX_ERROR;
3315 }
3316
3317 cortex_a->isrmasking_mode = n->value;
3318 }
3319
3320 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3321 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3322
3323 return ERROR_OK;
3324 }
3325
3326 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3327 {
3328 struct target *target = get_current_target(CMD_CTX);
3329 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3330
3331 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3332 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3333 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3334 { .name = NULL, .value = -1 },
3335 };
3336 const Jim_Nvp *n;
3337
3338 if (CMD_ARGC > 0) {
3339 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3340 if (n->name == NULL)
3341 return ERROR_COMMAND_SYNTAX_ERROR;
3342 cortex_a->dacrfixup_mode = n->value;
3343
3344 }
3345
3346 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3347 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3348
3349 return ERROR_OK;
3350 }
3351
3352 static const struct command_registration cortex_a_exec_command_handlers[] = {
3353 {
3354 .name = "cache_info",
3355 .handler = cortex_a_handle_cache_info_command,
3356 .mode = COMMAND_EXEC,
3357 .help = "display information about target caches",
3358 .usage = "",
3359 },
3360 {
3361 .name = "dbginit",
3362 .handler = cortex_a_handle_dbginit_command,
3363 .mode = COMMAND_EXEC,
3364 .help = "Initialize core debug",
3365 .usage = "",
3366 },
3367 { .name = "smp_off",
3368 .handler = cortex_a_handle_smp_off_command,
3369 .mode = COMMAND_EXEC,
3370 .help = "Stop smp handling",
3371 .usage = "",},
3372 {
3373 .name = "smp_on",
3374 .handler = cortex_a_handle_smp_on_command,
3375 .mode = COMMAND_EXEC,
3376 .help = "Restart smp handling",
3377 .usage = "",
3378 },
3379 {
3380 .name = "smp_gdb",
3381 .handler = cortex_a_handle_smp_gdb_command,
3382 .mode = COMMAND_EXEC,
3383 .help = "display/fix current core played to gdb",
3384 .usage = "",
3385 },
3386 {
3387 .name = "maskisr",
3388 .handler = handle_cortex_a_mask_interrupts_command,
3389 .mode = COMMAND_ANY,
3390 .help = "mask cortex_a interrupts",
3391 .usage = "['on'|'off']",
3392 },
3393 {
3394 .name = "dacrfixup",
3395 .handler = handle_cortex_a_dacrfixup_command,
3396 .mode = COMMAND_EXEC,
3397 .help = "set domain access control (DACR) to all-manager "
3398 "on memory access",
3399 .usage = "['on'|'off']",
3400 },
3401
3402 COMMAND_REGISTRATION_DONE
3403 };
3404 static const struct command_registration cortex_a_command_handlers[] = {
3405 {
3406 .chain = arm_command_handlers,
3407 },
3408 {
3409 .chain = armv7a_command_handlers,
3410 },
3411 {
3412 .name = "cortex_a",
3413 .mode = COMMAND_ANY,
3414 .help = "Cortex-A command group",
3415 .usage = "",
3416 .chain = cortex_a_exec_command_handlers,
3417 },
3418 COMMAND_REGISTRATION_DONE
3419 };
3420
3421 struct target_type cortexa_target = {
3422 .name = "cortex_a",
3423 .deprecated_name = "cortex_a8",
3424
3425 .poll = cortex_a_poll,
3426 .arch_state = armv7a_arch_state,
3427
3428 .halt = cortex_a_halt,
3429 .resume = cortex_a_resume,
3430 .step = cortex_a_step,
3431
3432 .assert_reset = cortex_a_assert_reset,
3433 .deassert_reset = cortex_a_deassert_reset,
3434
3435 /* REVISIT allow exporting VFP3 registers ... */
3436 .get_gdb_reg_list = arm_get_gdb_reg_list,
3437
3438 .read_memory = cortex_a_read_memory,
3439 .write_memory = cortex_a_write_memory,
3440
3441 .read_buffer = cortex_a_read_buffer,
3442 .write_buffer = cortex_a_write_buffer,
3443
3444 .checksum_memory = arm_checksum_memory,
3445 .blank_check_memory = arm_blank_check_memory,
3446
3447 .run_algorithm = armv4_5_run_algorithm,
3448
3449 .add_breakpoint = cortex_a_add_breakpoint,
3450 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3451 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3452 .remove_breakpoint = cortex_a_remove_breakpoint,
3453 .add_watchpoint = NULL,
3454 .remove_watchpoint = NULL,
3455
3456 .commands = cortex_a_command_handlers,
3457 .target_create = cortex_a_target_create,
3458 .init_target = cortex_a_init_target,
3459 .examine = cortex_a_examine,
3460 .deinit_target = cortex_a_deinit_target,
3461
3462 .read_phys_memory = cortex_a_read_phys_memory,
3463 .write_phys_memory = cortex_a_write_phys_memory,
3464 .mmu = cortex_a_mmu,
3465 .virt2phys = cortex_a_virt2phys,
3466 };
3467
3468 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3469 {
3470 .name = "cache_info",
3471 .handler = cortex_a_handle_cache_info_command,
3472 .mode = COMMAND_EXEC,
3473 .help = "display information about target caches",
3474 .usage = "",
3475 },
3476 {
3477 .name = "dbginit",
3478 .handler = cortex_a_handle_dbginit_command,
3479 .mode = COMMAND_EXEC,
3480 .help = "Initialize core debug",
3481 .usage = "",
3482 },
3483 {
3484 .name = "maskisr",
3485 .handler = handle_cortex_a_mask_interrupts_command,
3486 .mode = COMMAND_EXEC,
3487 .help = "mask cortex_r4 interrupts",
3488 .usage = "['on'|'off']",
3489 },
3490
3491 COMMAND_REGISTRATION_DONE
3492 };
3493 static const struct command_registration cortex_r4_command_handlers[] = {
3494 {
3495 .chain = arm_command_handlers,
3496 },
3497 {
3498 .chain = armv7a_command_handlers,
3499 },
3500 {
3501 .name = "cortex_r4",
3502 .mode = COMMAND_ANY,
3503 .help = "Cortex-R4 command group",
3504 .usage = "",
3505 .chain = cortex_r4_exec_command_handlers,
3506 },
3507 COMMAND_REGISTRATION_DONE
3508 };
3509
3510 struct target_type cortexr4_target = {
3511 .name = "cortex_r4",
3512
3513 .poll = cortex_a_poll,
3514 .arch_state = armv7a_arch_state,
3515
3516 .halt = cortex_a_halt,
3517 .resume = cortex_a_resume,
3518 .step = cortex_a_step,
3519
3520 .assert_reset = cortex_a_assert_reset,
3521 .deassert_reset = cortex_a_deassert_reset,
3522
3523 /* REVISIT allow exporting VFP3 registers ... */
3524 .get_gdb_reg_list = arm_get_gdb_reg_list,
3525
3526 .read_memory = cortex_a_read_phys_memory,
3527 .write_memory = cortex_a_write_phys_memory,
3528
3529 .checksum_memory = arm_checksum_memory,
3530 .blank_check_memory = arm_blank_check_memory,
3531
3532 .run_algorithm = armv4_5_run_algorithm,
3533
3534 .add_breakpoint = cortex_a_add_breakpoint,
3535 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3536 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3537 .remove_breakpoint = cortex_a_remove_breakpoint,
3538 .add_watchpoint = NULL,
3539 .remove_watchpoint = NULL,
3540
3541 .commands = cortex_r4_command_handlers,
3542 .target_create = cortex_r4_target_create,
3543 .init_target = cortex_a_init_target,
3544 .examine = cortex_a_examine,
3545 .deinit_target = cortex_a_deinit_target,
3546 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)