Convert to non-recursive make
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include "jtag/swd.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 uint32_t virt, uint32_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 int retval;
206
207 /* lock memory-mapped access to debug registers to prevent
208 * software interference */
209 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
210 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
211 if (retval != ERROR_OK)
212 return retval;
213
214 /* Disable cacheline fills and force cache write-through in debug state */
215 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
216 armv7a->debug_base + CPUDBG_DSCCR, 0);
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Disable TLB lookup and refill/eviction in debug state */
221 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
222 armv7a->debug_base + CPUDBG_DSMCR, 0);
223 if (retval != ERROR_OK)
224 return retval;
225
226 /* Enabling of instruction execution in debug mode is done in debug_entry code */
227
228 /* Resync breakpoint registers */
229
230 /* Since this is likely called from init or reset, update target state information*/
231 return cortex_a_poll(target);
232 }
233
234 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
235 {
236 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
237 * Writes final value of DSCR into *dscr. Pass force to force always
238 * reading DSCR at least once. */
239 struct armv7a_common *armv7a = target_to_armv7a(target);
240 int64_t then = timeval_ms();
241 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
242 force = false;
243 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
244 armv7a->debug_base + CPUDBG_DSCR, dscr);
245 if (retval != ERROR_OK) {
246 LOG_ERROR("Could not read DSCR register");
247 return retval;
248 }
249 if (timeval_ms() > then + 1000) {
250 LOG_ERROR("Timeout waiting for InstrCompl=1");
251 return ERROR_FAIL;
252 }
253 }
254 return ERROR_OK;
255 }
256
257 /* To reduce needless round-trips, pass in a pointer to the current
258 * DSCR value. Initialize it to zero if you just need to know the
259 * value on return from this function; or DSCR_INSTR_COMP if you
260 * happen to know that no instruction is pending.
261 */
262 static int cortex_a_exec_opcode(struct target *target,
263 uint32_t opcode, uint32_t *dscr_p)
264 {
265 uint32_t dscr;
266 int retval;
267 struct armv7a_common *armv7a = target_to_armv7a(target);
268
269 dscr = dscr_p ? *dscr_p : 0;
270
271 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
272
273 /* Wait for InstrCompl bit to be set */
274 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
275 if (retval != ERROR_OK)
276 return retval;
277
278 retval = mem_ap_write_u32(armv7a->debug_ap,
279 armv7a->debug_base + CPUDBG_ITR, opcode);
280 if (retval != ERROR_OK)
281 return retval;
282
283 int64_t then = timeval_ms();
284 do {
285 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
286 armv7a->debug_base + CPUDBG_DSCR, &dscr);
287 if (retval != ERROR_OK) {
288 LOG_ERROR("Could not read DSCR register");
289 return retval;
290 }
291 if (timeval_ms() > then + 1000) {
292 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
293 return ERROR_FAIL;
294 }
295 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
296
297 if (dscr_p)
298 *dscr_p = dscr;
299
300 return retval;
301 }
302
303 /**************************************************************************
304 Read core register with very few exec_opcode, fast but needs work_area.
305 This can cause problems with MMU active.
306 **************************************************************************/
307 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
308 uint32_t *regfile)
309 {
310 int retval = ERROR_OK;
311 struct armv7a_common *armv7a = target_to_armv7a(target);
312
313 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
314 if (retval != ERROR_OK)
315 return retval;
316 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
317 if (retval != ERROR_OK)
318 return retval;
319 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
320 if (retval != ERROR_OK)
321 return retval;
322
323 retval = mem_ap_read_buf(armv7a->memory_ap,
324 (uint8_t *)(&regfile[1]), 4, 15, address);
325
326 return retval;
327 }
328
329 static int cortex_a_dap_read_coreregister_u32(struct target *target,
330 uint32_t *value, int regnum)
331 {
332 int retval = ERROR_OK;
333 uint8_t reg = regnum&0xFF;
334 uint32_t dscr = 0;
335 struct armv7a_common *armv7a = target_to_armv7a(target);
336
337 if (reg > 17)
338 return retval;
339
340 if (reg < 15) {
341 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
342 retval = cortex_a_exec_opcode(target,
343 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
344 &dscr);
345 if (retval != ERROR_OK)
346 return retval;
347 } else if (reg == 15) {
348 /* "MOV r0, r15"; then move r0 to DCCTX */
349 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
350 if (retval != ERROR_OK)
351 return retval;
352 retval = cortex_a_exec_opcode(target,
353 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
354 &dscr);
355 if (retval != ERROR_OK)
356 return retval;
357 } else {
358 /* "MRS r0, CPSR" or "MRS r0, SPSR"
359 * then move r0 to DCCTX
360 */
361 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
362 if (retval != ERROR_OK)
363 return retval;
364 retval = cortex_a_exec_opcode(target,
365 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
366 &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 }
370
371 /* Wait for DTRRXfull then read DTRRTX */
372 int64_t then = timeval_ms();
373 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
374 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
375 armv7a->debug_base + CPUDBG_DSCR, &dscr);
376 if (retval != ERROR_OK)
377 return retval;
378 if (timeval_ms() > then + 1000) {
379 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
380 return ERROR_FAIL;
381 }
382 }
383
384 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
385 armv7a->debug_base + CPUDBG_DTRTX, value);
386 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
387
388 return retval;
389 }
390
391 static int cortex_a_dap_write_coreregister_u32(struct target *target,
392 uint32_t value, int regnum)
393 {
394 int retval = ERROR_OK;
395 uint8_t Rd = regnum&0xFF;
396 uint32_t dscr;
397 struct armv7a_common *armv7a = target_to_armv7a(target);
398
399 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
400
401 /* Check that DCCRX is not full */
402 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
403 armv7a->debug_base + CPUDBG_DSCR, &dscr);
404 if (retval != ERROR_OK)
405 return retval;
406 if (dscr & DSCR_DTR_RX_FULL) {
407 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
408 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
409 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
410 &dscr);
411 if (retval != ERROR_OK)
412 return retval;
413 }
414
415 if (Rd > 17)
416 return retval;
417
418 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
419 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
420 retval = mem_ap_write_u32(armv7a->debug_ap,
421 armv7a->debug_base + CPUDBG_DTRRX, value);
422 if (retval != ERROR_OK)
423 return retval;
424
425 if (Rd < 15) {
426 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
427 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
428 &dscr);
429
430 if (retval != ERROR_OK)
431 return retval;
432 } else if (Rd == 15) {
433 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
434 * then "mov r15, r0"
435 */
436 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
441 if (retval != ERROR_OK)
442 return retval;
443 } else {
444 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
445 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
446 */
447 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
448 &dscr);
449 if (retval != ERROR_OK)
450 return retval;
451 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
452 &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455
456 /* "Prefetch flush" after modifying execution status in CPSR */
457 if (Rd == 16) {
458 retval = cortex_a_exec_opcode(target,
459 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
460 &dscr);
461 if (retval != ERROR_OK)
462 return retval;
463 }
464 }
465
466 return retval;
467 }
468
469 /* Write to memory mapped registers directly with no cache or mmu handling */
470 static int cortex_a_dap_write_memap_register_u32(struct target *target,
471 uint32_t address,
472 uint32_t value)
473 {
474 int retval;
475 struct armv7a_common *armv7a = target_to_armv7a(target);
476
477 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
478
479 return retval;
480 }
481
482 /*
483 * Cortex-A implementation of Debug Programmer's Model
484 *
485 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
486 * so there's no need to poll for it before executing an instruction.
487 *
488 * NOTE that in several of these cases the "stall" mode might be useful.
489 * It'd let us queue a few operations together... prepare/finish might
490 * be the places to enable/disable that mode.
491 */
492
493 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
494 {
495 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
496 }
497
498 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
499 {
500 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
501 return mem_ap_write_u32(a->armv7a_common.debug_ap,
502 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
503 }
504
505 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
506 uint32_t *dscr_p)
507 {
508 uint32_t dscr = DSCR_INSTR_COMP;
509 int retval;
510
511 if (dscr_p)
512 dscr = *dscr_p;
513
514 /* Wait for DTRRXfull */
515 int64_t then = timeval_ms();
516 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
517 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
518 a->armv7a_common.debug_base + CPUDBG_DSCR,
519 &dscr);
520 if (retval != ERROR_OK)
521 return retval;
522 if (timeval_ms() > then + 1000) {
523 LOG_ERROR("Timeout waiting for read dcc");
524 return ERROR_FAIL;
525 }
526 }
527
528 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
529 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
530 if (retval != ERROR_OK)
531 return retval;
532 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
533
534 if (dscr_p)
535 *dscr_p = dscr;
536
537 return retval;
538 }
539
540 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
541 {
542 struct cortex_a_common *a = dpm_to_a(dpm);
543 uint32_t dscr;
544 int retval;
545
546 /* set up invariant: INSTR_COMP is set after ever DPM operation */
547 int64_t then = timeval_ms();
548 for (;; ) {
549 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
550 a->armv7a_common.debug_base + CPUDBG_DSCR,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554 if ((dscr & DSCR_INSTR_COMP) != 0)
555 break;
556 if (timeval_ms() > then + 1000) {
557 LOG_ERROR("Timeout waiting for dpm prepare");
558 return ERROR_FAIL;
559 }
560 }
561
562 /* this "should never happen" ... */
563 if (dscr & DSCR_DTR_RX_FULL) {
564 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
565 /* Clear DCCRX */
566 retval = cortex_a_exec_opcode(
567 a->armv7a_common.arm.target,
568 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
569 &dscr);
570 if (retval != ERROR_OK)
571 return retval;
572 }
573
574 return retval;
575 }
576
577 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
578 {
579 /* REVISIT what could be done here? */
580 return ERROR_OK;
581 }
582
583 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
584 uint32_t opcode, uint32_t data)
585 {
586 struct cortex_a_common *a = dpm_to_a(dpm);
587 int retval;
588 uint32_t dscr = DSCR_INSTR_COMP;
589
590 retval = cortex_a_write_dcc(a, data);
591 if (retval != ERROR_OK)
592 return retval;
593
594 return cortex_a_exec_opcode(
595 a->armv7a_common.arm.target,
596 opcode,
597 &dscr);
598 }
599
600 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
601 uint32_t opcode, uint32_t data)
602 {
603 struct cortex_a_common *a = dpm_to_a(dpm);
604 uint32_t dscr = DSCR_INSTR_COMP;
605 int retval;
606
607 retval = cortex_a_write_dcc(a, data);
608 if (retval != ERROR_OK)
609 return retval;
610
611 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
612 retval = cortex_a_exec_opcode(
613 a->armv7a_common.arm.target,
614 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
615 &dscr);
616 if (retval != ERROR_OK)
617 return retval;
618
619 /* then the opcode, taking data from R0 */
620 retval = cortex_a_exec_opcode(
621 a->armv7a_common.arm.target,
622 opcode,
623 &dscr);
624
625 return retval;
626 }
627
628 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
629 {
630 struct target *target = dpm->arm->target;
631 uint32_t dscr = DSCR_INSTR_COMP;
632
633 /* "Prefetch flush" after modifying execution status in CPSR */
634 return cortex_a_exec_opcode(target,
635 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
636 &dscr);
637 }
638
639 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
640 uint32_t opcode, uint32_t *data)
641 {
642 struct cortex_a_common *a = dpm_to_a(dpm);
643 int retval;
644 uint32_t dscr = DSCR_INSTR_COMP;
645
646 /* the opcode, writing data to DCC */
647 retval = cortex_a_exec_opcode(
648 a->armv7a_common.arm.target,
649 opcode,
650 &dscr);
651 if (retval != ERROR_OK)
652 return retval;
653
654 return cortex_a_read_dcc(a, data, &dscr);
655 }
656
657
658 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
659 uint32_t opcode, uint32_t *data)
660 {
661 struct cortex_a_common *a = dpm_to_a(dpm);
662 uint32_t dscr = DSCR_INSTR_COMP;
663 int retval;
664
665 /* the opcode, writing data to R0 */
666 retval = cortex_a_exec_opcode(
667 a->armv7a_common.arm.target,
668 opcode,
669 &dscr);
670 if (retval != ERROR_OK)
671 return retval;
672
673 /* write R0 to DCC */
674 retval = cortex_a_exec_opcode(
675 a->armv7a_common.arm.target,
676 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
677 &dscr);
678 if (retval != ERROR_OK)
679 return retval;
680
681 return cortex_a_read_dcc(a, data, &dscr);
682 }
683
684 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
685 uint32_t addr, uint32_t control)
686 {
687 struct cortex_a_common *a = dpm_to_a(dpm);
688 uint32_t vr = a->armv7a_common.debug_base;
689 uint32_t cr = a->armv7a_common.debug_base;
690 int retval;
691
692 switch (index_t) {
693 case 0 ... 15: /* breakpoints */
694 vr += CPUDBG_BVR_BASE;
695 cr += CPUDBG_BCR_BASE;
696 break;
697 case 16 ... 31: /* watchpoints */
698 vr += CPUDBG_WVR_BASE;
699 cr += CPUDBG_WCR_BASE;
700 index_t -= 16;
701 break;
702 default:
703 return ERROR_FAIL;
704 }
705 vr += 4 * index_t;
706 cr += 4 * index_t;
707
708 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
709 (unsigned) vr, (unsigned) cr);
710
711 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
712 vr, addr);
713 if (retval != ERROR_OK)
714 return retval;
715 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
716 cr, control);
717 return retval;
718 }
719
720 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
721 {
722 struct cortex_a_common *a = dpm_to_a(dpm);
723 uint32_t cr;
724
725 switch (index_t) {
726 case 0 ... 15:
727 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
728 break;
729 case 16 ... 31:
730 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
731 index_t -= 16;
732 break;
733 default:
734 return ERROR_FAIL;
735 }
736 cr += 4 * index_t;
737
738 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
739
740 /* clear control register */
741 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
742 }
743
744 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
745 {
746 struct arm_dpm *dpm = &a->armv7a_common.dpm;
747 int retval;
748
749 dpm->arm = &a->armv7a_common.arm;
750 dpm->didr = didr;
751
752 dpm->prepare = cortex_a_dpm_prepare;
753 dpm->finish = cortex_a_dpm_finish;
754
755 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
756 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
757 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
758
759 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
760 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
761
762 dpm->bpwp_enable = cortex_a_bpwp_enable;
763 dpm->bpwp_disable = cortex_a_bpwp_disable;
764
765 retval = arm_dpm_setup(dpm);
766 if (retval == ERROR_OK)
767 retval = arm_dpm_initialize(dpm);
768
769 return retval;
770 }
771 static struct target *get_cortex_a(struct target *target, int32_t coreid)
772 {
773 struct target_list *head;
774 struct target *curr;
775
776 head = target->head;
777 while (head != (struct target_list *)NULL) {
778 curr = head->target;
779 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
780 return curr;
781 head = head->next;
782 }
783 return target;
784 }
785 static int cortex_a_halt(struct target *target);
786
787 static int cortex_a_halt_smp(struct target *target)
788 {
789 int retval = 0;
790 struct target_list *head;
791 struct target *curr;
792 head = target->head;
793 while (head != (struct target_list *)NULL) {
794 curr = head->target;
795 if ((curr != target) && (curr->state != TARGET_HALTED)
796 && target_was_examined(curr))
797 retval += cortex_a_halt(curr);
798 head = head->next;
799 }
800 return retval;
801 }
802
803 static int update_halt_gdb(struct target *target)
804 {
805 int retval = 0;
806 if (target->gdb_service && target->gdb_service->core[0] == -1) {
807 target->gdb_service->target = target;
808 target->gdb_service->core[0] = target->coreid;
809 retval += cortex_a_halt_smp(target);
810 }
811 return retval;
812 }
813
814 /*
815 * Cortex-A Run control
816 */
817
818 static int cortex_a_poll(struct target *target)
819 {
820 int retval = ERROR_OK;
821 uint32_t dscr;
822 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
823 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
824 enum target_state prev_target_state = target->state;
825 /* toggle to another core is done by gdb as follow */
826 /* maint packet J core_id */
827 /* continue */
828 /* the next polling trigger an halt event sent to gdb */
829 if ((target->state == TARGET_HALTED) && (target->smp) &&
830 (target->gdb_service) &&
831 (target->gdb_service->target == NULL)) {
832 target->gdb_service->target =
833 get_cortex_a(target, target->gdb_service->core[1]);
834 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
835 return retval;
836 }
837 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
838 armv7a->debug_base + CPUDBG_DSCR, &dscr);
839 if (retval != ERROR_OK)
840 return retval;
841 cortex_a->cpudbg_dscr = dscr;
842
843 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
844 if (prev_target_state != TARGET_HALTED) {
845 /* We have a halting debug event */
846 LOG_DEBUG("Target halted");
847 target->state = TARGET_HALTED;
848 if ((prev_target_state == TARGET_RUNNING)
849 || (prev_target_state == TARGET_UNKNOWN)
850 || (prev_target_state == TARGET_RESET)) {
851 retval = cortex_a_debug_entry(target);
852 if (retval != ERROR_OK)
853 return retval;
854 if (target->smp) {
855 retval = update_halt_gdb(target);
856 if (retval != ERROR_OK)
857 return retval;
858 }
859
860 if (arm_semihosting(target, &retval) != 0)
861 return retval;
862
863 target_call_event_callbacks(target,
864 TARGET_EVENT_HALTED);
865 }
866 if (prev_target_state == TARGET_DEBUG_RUNNING) {
867 LOG_DEBUG(" ");
868
869 retval = cortex_a_debug_entry(target);
870 if (retval != ERROR_OK)
871 return retval;
872 if (target->smp) {
873 retval = update_halt_gdb(target);
874 if (retval != ERROR_OK)
875 return retval;
876 }
877
878 target_call_event_callbacks(target,
879 TARGET_EVENT_DEBUG_HALTED);
880 }
881 }
882 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
883 target->state = TARGET_RUNNING;
884 else {
885 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
886 target->state = TARGET_UNKNOWN;
887 }
888
889 return retval;
890 }
891
892 static int cortex_a_halt(struct target *target)
893 {
894 int retval = ERROR_OK;
895 uint32_t dscr;
896 struct armv7a_common *armv7a = target_to_armv7a(target);
897
898 /*
899 * Tell the core to be halted by writing DRCR with 0x1
900 * and then wait for the core to be halted.
901 */
902 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
903 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
904 if (retval != ERROR_OK)
905 return retval;
906
907 /*
908 * enter halting debug mode
909 */
910 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
911 armv7a->debug_base + CPUDBG_DSCR, &dscr);
912 if (retval != ERROR_OK)
913 return retval;
914
915 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
916 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
917 if (retval != ERROR_OK)
918 return retval;
919
920 int64_t then = timeval_ms();
921 for (;; ) {
922 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
923 armv7a->debug_base + CPUDBG_DSCR, &dscr);
924 if (retval != ERROR_OK)
925 return retval;
926 if ((dscr & DSCR_CORE_HALTED) != 0)
927 break;
928 if (timeval_ms() > then + 1000) {
929 LOG_ERROR("Timeout waiting for halt");
930 return ERROR_FAIL;
931 }
932 }
933
934 target->debug_reason = DBG_REASON_DBGRQ;
935
936 return ERROR_OK;
937 }
938
939 static int cortex_a_internal_restore(struct target *target, int current,
940 uint32_t *address, int handle_breakpoints, int debug_execution)
941 {
942 struct armv7a_common *armv7a = target_to_armv7a(target);
943 struct arm *arm = &armv7a->arm;
944 int retval;
945 uint32_t resume_pc;
946
947 if (!debug_execution)
948 target_free_all_working_areas(target);
949
950 #if 0
951 if (debug_execution) {
952 /* Disable interrupts */
953 /* We disable interrupts in the PRIMASK register instead of
954 * masking with C_MASKINTS,
955 * This is probably the same issue as Cortex-M3 Errata 377493:
956 * C_MASKINTS in parallel with disabled interrupts can cause
957 * local faults to not be taken. */
958 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
959 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
960 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
961
962 /* Make sure we are in Thumb mode */
963 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
964 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
965 32) | (1 << 24));
966 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
967 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
968 }
969 #endif
970
971 /* current = 1: continue on current pc, otherwise continue at <address> */
972 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
973 if (!current)
974 resume_pc = *address;
975 else
976 *address = resume_pc;
977
978 /* Make sure that the Armv7 gdb thumb fixups does not
979 * kill the return address
980 */
981 switch (arm->core_state) {
982 case ARM_STATE_ARM:
983 resume_pc &= 0xFFFFFFFC;
984 break;
985 case ARM_STATE_THUMB:
986 case ARM_STATE_THUMB_EE:
987 /* When the return address is loaded into PC
988 * bit 0 must be 1 to stay in Thumb state
989 */
990 resume_pc |= 0x1;
991 break;
992 case ARM_STATE_JAZELLE:
993 LOG_ERROR("How do I resume into Jazelle state??");
994 return ERROR_FAIL;
995 }
996 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
997 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
998 arm->pc->dirty = 1;
999 arm->pc->valid = 1;
1000
1001 /* restore dpm_mode at system halt */
1002 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1003 /* called it now before restoring context because it uses cpu
1004 * register r0 for restoring cp15 control register */
1005 retval = cortex_a_restore_cp15_control_reg(target);
1006 if (retval != ERROR_OK)
1007 return retval;
1008 retval = cortex_a_restore_context(target, handle_breakpoints);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 target->debug_reason = DBG_REASON_NOTHALTED;
1012 target->state = TARGET_RUNNING;
1013
1014 /* registers are now invalid */
1015 register_cache_invalidate(arm->core_cache);
1016
1017 #if 0
1018 /* the front-end may request us not to handle breakpoints */
1019 if (handle_breakpoints) {
1020 /* Single step past breakpoint at current address */
1021 breakpoint = breakpoint_find(target, resume_pc);
1022 if (breakpoint) {
1023 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1024 cortex_m3_unset_breakpoint(target, breakpoint);
1025 cortex_m3_single_step_core(target);
1026 cortex_m3_set_breakpoint(target, breakpoint);
1027 }
1028 }
1029
1030 #endif
1031 return retval;
1032 }
1033
1034 static int cortex_a_internal_restart(struct target *target)
1035 {
1036 struct armv7a_common *armv7a = target_to_armv7a(target);
1037 struct arm *arm = &armv7a->arm;
1038 int retval;
1039 uint32_t dscr;
1040 /*
1041 * * Restart core and wait for it to be started. Clear ITRen and sticky
1042 * * exception flags: see ARMv7 ARM, C5.9.
1043 *
1044 * REVISIT: for single stepping, we probably want to
1045 * disable IRQs by default, with optional override...
1046 */
1047
1048 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1049 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1050 if (retval != ERROR_OK)
1051 return retval;
1052
1053 if ((dscr & DSCR_INSTR_COMP) == 0)
1054 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1055
1056 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1057 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1058 if (retval != ERROR_OK)
1059 return retval;
1060
1061 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1062 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1063 DRCR_CLEAR_EXCEPTIONS);
1064 if (retval != ERROR_OK)
1065 return retval;
1066
1067 int64_t then = timeval_ms();
1068 for (;; ) {
1069 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1070 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1071 if (retval != ERROR_OK)
1072 return retval;
1073 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1074 break;
1075 if (timeval_ms() > then + 1000) {
1076 LOG_ERROR("Timeout waiting for resume");
1077 return ERROR_FAIL;
1078 }
1079 }
1080
1081 target->debug_reason = DBG_REASON_NOTHALTED;
1082 target->state = TARGET_RUNNING;
1083
1084 /* registers are now invalid */
1085 register_cache_invalidate(arm->core_cache);
1086
1087 return ERROR_OK;
1088 }
1089
1090 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1091 {
1092 int retval = 0;
1093 struct target_list *head;
1094 struct target *curr;
1095 uint32_t address;
1096 head = target->head;
1097 while (head != (struct target_list *)NULL) {
1098 curr = head->target;
1099 if ((curr != target) && (curr->state != TARGET_RUNNING)
1100 && target_was_examined(curr)) {
1101 /* resume current address , not in step mode */
1102 retval += cortex_a_internal_restore(curr, 1, &address,
1103 handle_breakpoints, 0);
1104 retval += cortex_a_internal_restart(curr);
1105 }
1106 head = head->next;
1107
1108 }
1109 return retval;
1110 }
1111
1112 static int cortex_a_resume(struct target *target, int current,
1113 uint32_t address, int handle_breakpoints, int debug_execution)
1114 {
1115 int retval = 0;
1116 /* dummy resume for smp toggle in order to reduce gdb impact */
1117 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1118 /* simulate a start and halt of target */
1119 target->gdb_service->target = NULL;
1120 target->gdb_service->core[0] = target->gdb_service->core[1];
1121 /* fake resume at next poll we play the target core[1], see poll*/
1122 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1123 return 0;
1124 }
1125 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1126 if (target->smp) {
1127 target->gdb_service->core[0] = -1;
1128 retval = cortex_a_restore_smp(target, handle_breakpoints);
1129 if (retval != ERROR_OK)
1130 return retval;
1131 }
1132 cortex_a_internal_restart(target);
1133
1134 if (!debug_execution) {
1135 target->state = TARGET_RUNNING;
1136 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1137 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1138 } else {
1139 target->state = TARGET_DEBUG_RUNNING;
1140 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1141 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1142 }
1143
1144 return ERROR_OK;
1145 }
1146
1147 static int cortex_a_debug_entry(struct target *target)
1148 {
1149 int i;
1150 uint32_t regfile[16], cpsr, spsr, dscr;
1151 int retval = ERROR_OK;
1152 struct working_area *regfile_working_area = NULL;
1153 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1154 struct armv7a_common *armv7a = target_to_armv7a(target);
1155 struct arm *arm = &armv7a->arm;
1156 struct reg *reg;
1157
1158 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1159
1160 /* REVISIT surely we should not re-read DSCR !! */
1161 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1162 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1163 if (retval != ERROR_OK)
1164 return retval;
1165
1166 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1167 * imprecise data aborts get discarded by issuing a Data
1168 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1169 */
1170
1171 /* Enable the ITR execution once we are in debug mode */
1172 dscr |= DSCR_ITR_EN;
1173 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1174 armv7a->debug_base + CPUDBG_DSCR, dscr);
1175 if (retval != ERROR_OK)
1176 return retval;
1177
1178 /* Examine debug reason */
1179 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1180
1181 /* save address of instruction that triggered the watchpoint? */
1182 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1183 uint32_t wfar;
1184
1185 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1186 armv7a->debug_base + CPUDBG_WFAR,
1187 &wfar);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1191 }
1192
1193 /* REVISIT fast_reg_read is never set ... */
1194
1195 /* Examine target state and mode */
1196 if (cortex_a->fast_reg_read)
1197 target_alloc_working_area(target, 64, &regfile_working_area);
1198
1199
1200 /* First load register acessible through core debug port*/
1201 if (!regfile_working_area)
1202 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1203 else {
1204 retval = cortex_a_read_regs_through_mem(target,
1205 regfile_working_area->address, regfile);
1206
1207 target_free_working_area(target, regfile_working_area);
1208 if (retval != ERROR_OK)
1209 return retval;
1210
1211 /* read Current PSR */
1212 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1213 /* store current cpsr */
1214 if (retval != ERROR_OK)
1215 return retval;
1216
1217 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1218
1219 arm_set_cpsr(arm, cpsr);
1220
1221 /* update cache */
1222 for (i = 0; i <= ARM_PC; i++) {
1223 reg = arm_reg_current(arm, i);
1224
1225 buf_set_u32(reg->value, 0, 32, regfile[i]);
1226 reg->valid = 1;
1227 reg->dirty = 0;
1228 }
1229
1230 /* Fixup PC Resume Address */
1231 if (cpsr & (1 << 5)) {
1232 /* T bit set for Thumb or ThumbEE state */
1233 regfile[ARM_PC] -= 4;
1234 } else {
1235 /* ARM state */
1236 regfile[ARM_PC] -= 8;
1237 }
1238
1239 reg = arm->pc;
1240 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1241 reg->dirty = reg->valid;
1242 }
1243
1244 /* read Saved PSR */
1245 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1246 /* store current spsr */
1247 if (retval != ERROR_OK)
1248 return retval;
1249
1250 reg = arm->spsr;
1251 buf_set_u32(reg->value, 0, 32, spsr);
1252 reg->valid = 1;
1253 reg->dirty = 0;
1254
1255 #if 0
1256 /* TODO, Move this */
1257 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1258 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1259 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1260
1261 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1262 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1263
1264 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1265 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1266 #endif
1267
1268 /* Are we in an exception handler */
1269 /* armv4_5->exception_number = 0; */
1270 if (armv7a->post_debug_entry) {
1271 retval = armv7a->post_debug_entry(target);
1272 if (retval != ERROR_OK)
1273 return retval;
1274 }
1275
1276 return retval;
1277 }
1278
1279 static int cortex_a_post_debug_entry(struct target *target)
1280 {
1281 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1282 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1283 int retval;
1284
1285 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1286 retval = armv7a->arm.mrc(target, 15,
1287 0, 0, /* op1, op2 */
1288 1, 0, /* CRn, CRm */
1289 &cortex_a->cp15_control_reg);
1290 if (retval != ERROR_OK)
1291 return retval;
1292 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1293 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1294
1295 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1296 armv7a_identify_cache(target);
1297
1298 if (armv7a->is_armv7r) {
1299 armv7a->armv7a_mmu.mmu_enabled = 0;
1300 } else {
1301 armv7a->armv7a_mmu.mmu_enabled =
1302 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1303 }
1304 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1305 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1306 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1307 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1308 cortex_a->curr_mode = armv7a->arm.core_mode;
1309
1310 /* switch to SVC mode to read DACR */
1311 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1312 armv7a->arm.mrc(target, 15,
1313 0, 0, 3, 0,
1314 &cortex_a->cp15_dacr_reg);
1315
1316 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1317 cortex_a->cp15_dacr_reg);
1318
1319 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1320 return ERROR_OK;
1321 }
1322
1323 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1324 {
1325 struct armv7a_common *armv7a = target_to_armv7a(target);
1326 uint32_t dscr;
1327
1328 /* Read DSCR */
1329 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1330 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1331 if (ERROR_OK != retval)
1332 return retval;
1333
1334 /* clear bitfield */
1335 dscr &= ~bit_mask;
1336 /* put new value */
1337 dscr |= value & bit_mask;
1338
1339 /* write new DSCR */
1340 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1341 armv7a->debug_base + CPUDBG_DSCR, dscr);
1342 return retval;
1343 }
1344
1345 static int cortex_a_step(struct target *target, int current, uint32_t address,
1346 int handle_breakpoints)
1347 {
1348 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1349 struct armv7a_common *armv7a = target_to_armv7a(target);
1350 struct arm *arm = &armv7a->arm;
1351 struct breakpoint *breakpoint = NULL;
1352 struct breakpoint stepbreakpoint;
1353 struct reg *r;
1354 int retval;
1355
1356 if (target->state != TARGET_HALTED) {
1357 LOG_WARNING("target not halted");
1358 return ERROR_TARGET_NOT_HALTED;
1359 }
1360
1361 /* current = 1: continue on current pc, otherwise continue at <address> */
1362 r = arm->pc;
1363 if (!current)
1364 buf_set_u32(r->value, 0, 32, address);
1365 else
1366 address = buf_get_u32(r->value, 0, 32);
1367
1368 /* The front-end may request us not to handle breakpoints.
1369 * But since Cortex-A uses breakpoint for single step,
1370 * we MUST handle breakpoints.
1371 */
1372 handle_breakpoints = 1;
1373 if (handle_breakpoints) {
1374 breakpoint = breakpoint_find(target, address);
1375 if (breakpoint)
1376 cortex_a_unset_breakpoint(target, breakpoint);
1377 }
1378
1379 /* Setup single step breakpoint */
1380 stepbreakpoint.address = address;
1381 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1382 ? 2 : 4;
1383 stepbreakpoint.type = BKPT_HARD;
1384 stepbreakpoint.set = 0;
1385
1386 /* Disable interrupts during single step if requested */
1387 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1388 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1389 if (ERROR_OK != retval)
1390 return retval;
1391 }
1392
1393 /* Break on IVA mismatch */
1394 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1395
1396 target->debug_reason = DBG_REASON_SINGLESTEP;
1397
1398 retval = cortex_a_resume(target, 1, address, 0, 0);
1399 if (retval != ERROR_OK)
1400 return retval;
1401
1402 int64_t then = timeval_ms();
1403 while (target->state != TARGET_HALTED) {
1404 retval = cortex_a_poll(target);
1405 if (retval != ERROR_OK)
1406 return retval;
1407 if (timeval_ms() > then + 1000) {
1408 LOG_ERROR("timeout waiting for target halt");
1409 return ERROR_FAIL;
1410 }
1411 }
1412
1413 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1414
1415 /* Re-enable interrupts if they were disabled */
1416 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1417 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1418 if (ERROR_OK != retval)
1419 return retval;
1420 }
1421
1422
1423 target->debug_reason = DBG_REASON_BREAKPOINT;
1424
1425 if (breakpoint)
1426 cortex_a_set_breakpoint(target, breakpoint, 0);
1427
1428 if (target->state != TARGET_HALTED)
1429 LOG_DEBUG("target stepped");
1430
1431 return ERROR_OK;
1432 }
1433
1434 static int cortex_a_restore_context(struct target *target, bool bpwp)
1435 {
1436 struct armv7a_common *armv7a = target_to_armv7a(target);
1437
1438 LOG_DEBUG(" ");
1439
1440 if (armv7a->pre_restore_context)
1441 armv7a->pre_restore_context(target);
1442
1443 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1444 }
1445
1446 /*
1447 * Cortex-A Breakpoint and watchpoint functions
1448 */
1449
1450 /* Setup hardware Breakpoint Register Pair */
1451 static int cortex_a_set_breakpoint(struct target *target,
1452 struct breakpoint *breakpoint, uint8_t matchmode)
1453 {
1454 int retval;
1455 int brp_i = 0;
1456 uint32_t control;
1457 uint8_t byte_addr_select = 0x0F;
1458 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1459 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1460 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1461
1462 if (breakpoint->set) {
1463 LOG_WARNING("breakpoint already set");
1464 return ERROR_OK;
1465 }
1466
1467 if (breakpoint->type == BKPT_HARD) {
1468 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1469 brp_i++;
1470 if (brp_i >= cortex_a->brp_num) {
1471 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1472 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1473 }
1474 breakpoint->set = brp_i + 1;
1475 if (breakpoint->length == 2)
1476 byte_addr_select = (3 << (breakpoint->address & 0x02));
1477 control = ((matchmode & 0x7) << 20)
1478 | (byte_addr_select << 5)
1479 | (3 << 1) | 1;
1480 brp_list[brp_i].used = 1;
1481 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1482 brp_list[brp_i].control = control;
1483 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1484 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1485 brp_list[brp_i].value);
1486 if (retval != ERROR_OK)
1487 return retval;
1488 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1489 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1490 brp_list[brp_i].control);
1491 if (retval != ERROR_OK)
1492 return retval;
1493 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1494 brp_list[brp_i].control,
1495 brp_list[brp_i].value);
1496 } else if (breakpoint->type == BKPT_SOFT) {
1497 uint8_t code[4];
1498 if (breakpoint->length == 2)
1499 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1500 else
1501 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1502 retval = target_read_memory(target,
1503 breakpoint->address & 0xFFFFFFFE,
1504 breakpoint->length, 1,
1505 breakpoint->orig_instr);
1506 if (retval != ERROR_OK)
1507 return retval;
1508
1509 /* make sure data cache is cleaned & invalidated down to PoC */
1510 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1511 armv7a_cache_flush_virt(target, breakpoint->address,
1512 breakpoint->length);
1513 }
1514
1515 retval = target_write_memory(target,
1516 breakpoint->address & 0xFFFFFFFE,
1517 breakpoint->length, 1, code);
1518 if (retval != ERROR_OK)
1519 return retval;
1520
1521 /* update i-cache at breakpoint location */
1522 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1523 breakpoint->length);
1524 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1525 breakpoint->length);
1526
1527 breakpoint->set = 0x11; /* Any nice value but 0 */
1528 }
1529
1530 return ERROR_OK;
1531 }
1532
1533 static int cortex_a_set_context_breakpoint(struct target *target,
1534 struct breakpoint *breakpoint, uint8_t matchmode)
1535 {
1536 int retval = ERROR_FAIL;
1537 int brp_i = 0;
1538 uint32_t control;
1539 uint8_t byte_addr_select = 0x0F;
1540 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1541 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1542 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1543
1544 if (breakpoint->set) {
1545 LOG_WARNING("breakpoint already set");
1546 return retval;
1547 }
1548 /*check available context BRPs*/
1549 while ((brp_list[brp_i].used ||
1550 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1551 brp_i++;
1552
1553 if (brp_i >= cortex_a->brp_num) {
1554 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1555 return ERROR_FAIL;
1556 }
1557
1558 breakpoint->set = brp_i + 1;
1559 control = ((matchmode & 0x7) << 20)
1560 | (byte_addr_select << 5)
1561 | (3 << 1) | 1;
1562 brp_list[brp_i].used = 1;
1563 brp_list[brp_i].value = (breakpoint->asid);
1564 brp_list[brp_i].control = control;
1565 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1566 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1567 brp_list[brp_i].value);
1568 if (retval != ERROR_OK)
1569 return retval;
1570 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1571 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1572 brp_list[brp_i].control);
1573 if (retval != ERROR_OK)
1574 return retval;
1575 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1576 brp_list[brp_i].control,
1577 brp_list[brp_i].value);
1578 return ERROR_OK;
1579
1580 }
1581
1582 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1583 {
1584 int retval = ERROR_FAIL;
1585 int brp_1 = 0; /* holds the contextID pair */
1586 int brp_2 = 0; /* holds the IVA pair */
1587 uint32_t control_CTX, control_IVA;
1588 uint8_t CTX_byte_addr_select = 0x0F;
1589 uint8_t IVA_byte_addr_select = 0x0F;
1590 uint8_t CTX_machmode = 0x03;
1591 uint8_t IVA_machmode = 0x01;
1592 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1593 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1594 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1595
1596 if (breakpoint->set) {
1597 LOG_WARNING("breakpoint already set");
1598 return retval;
1599 }
1600 /*check available context BRPs*/
1601 while ((brp_list[brp_1].used ||
1602 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1603 brp_1++;
1604
1605 printf("brp(CTX) found num: %d\n", brp_1);
1606 if (brp_1 >= cortex_a->brp_num) {
1607 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1608 return ERROR_FAIL;
1609 }
1610
1611 while ((brp_list[brp_2].used ||
1612 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1613 brp_2++;
1614
1615 printf("brp(IVA) found num: %d\n", brp_2);
1616 if (brp_2 >= cortex_a->brp_num) {
1617 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1618 return ERROR_FAIL;
1619 }
1620
1621 breakpoint->set = brp_1 + 1;
1622 breakpoint->linked_BRP = brp_2;
1623 control_CTX = ((CTX_machmode & 0x7) << 20)
1624 | (brp_2 << 16)
1625 | (0 << 14)
1626 | (CTX_byte_addr_select << 5)
1627 | (3 << 1) | 1;
1628 brp_list[brp_1].used = 1;
1629 brp_list[brp_1].value = (breakpoint->asid);
1630 brp_list[brp_1].control = control_CTX;
1631 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1632 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1633 brp_list[brp_1].value);
1634 if (retval != ERROR_OK)
1635 return retval;
1636 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1637 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1638 brp_list[brp_1].control);
1639 if (retval != ERROR_OK)
1640 return retval;
1641
1642 control_IVA = ((IVA_machmode & 0x7) << 20)
1643 | (brp_1 << 16)
1644 | (IVA_byte_addr_select << 5)
1645 | (3 << 1) | 1;
1646 brp_list[brp_2].used = 1;
1647 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1648 brp_list[brp_2].control = control_IVA;
1649 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1650 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1651 brp_list[brp_2].value);
1652 if (retval != ERROR_OK)
1653 return retval;
1654 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1655 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1656 brp_list[brp_2].control);
1657 if (retval != ERROR_OK)
1658 return retval;
1659
1660 return ERROR_OK;
1661 }
1662
1663 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1664 {
1665 int retval;
1666 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1667 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1668 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1669
1670 if (!breakpoint->set) {
1671 LOG_WARNING("breakpoint not set");
1672 return ERROR_OK;
1673 }
1674
1675 if (breakpoint->type == BKPT_HARD) {
1676 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1677 int brp_i = breakpoint->set - 1;
1678 int brp_j = breakpoint->linked_BRP;
1679 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1680 LOG_DEBUG("Invalid BRP number in breakpoint");
1681 return ERROR_OK;
1682 }
1683 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1684 brp_list[brp_i].control, brp_list[brp_i].value);
1685 brp_list[brp_i].used = 0;
1686 brp_list[brp_i].value = 0;
1687 brp_list[brp_i].control = 0;
1688 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1689 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1690 brp_list[brp_i].control);
1691 if (retval != ERROR_OK)
1692 return retval;
1693 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1694 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1695 brp_list[brp_i].value);
1696 if (retval != ERROR_OK)
1697 return retval;
1698 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1699 LOG_DEBUG("Invalid BRP number in breakpoint");
1700 return ERROR_OK;
1701 }
1702 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1703 brp_list[brp_j].control, brp_list[brp_j].value);
1704 brp_list[brp_j].used = 0;
1705 brp_list[brp_j].value = 0;
1706 brp_list[brp_j].control = 0;
1707 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1708 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1709 brp_list[brp_j].control);
1710 if (retval != ERROR_OK)
1711 return retval;
1712 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1713 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1714 brp_list[brp_j].value);
1715 if (retval != ERROR_OK)
1716 return retval;
1717 breakpoint->linked_BRP = 0;
1718 breakpoint->set = 0;
1719 return ERROR_OK;
1720
1721 } else {
1722 int brp_i = breakpoint->set - 1;
1723 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1724 LOG_DEBUG("Invalid BRP number in breakpoint");
1725 return ERROR_OK;
1726 }
1727 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1728 brp_list[brp_i].control, brp_list[brp_i].value);
1729 brp_list[brp_i].used = 0;
1730 brp_list[brp_i].value = 0;
1731 brp_list[brp_i].control = 0;
1732 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1733 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1734 brp_list[brp_i].control);
1735 if (retval != ERROR_OK)
1736 return retval;
1737 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1738 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1739 brp_list[brp_i].value);
1740 if (retval != ERROR_OK)
1741 return retval;
1742 breakpoint->set = 0;
1743 return ERROR_OK;
1744 }
1745 } else {
1746
1747 /* make sure data cache is cleaned & invalidated down to PoC */
1748 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1749 armv7a_cache_flush_virt(target, breakpoint->address,
1750 breakpoint->length);
1751 }
1752
1753 /* restore original instruction (kept in target endianness) */
1754 if (breakpoint->length == 4) {
1755 retval = target_write_memory(target,
1756 breakpoint->address & 0xFFFFFFFE,
1757 4, 1, breakpoint->orig_instr);
1758 if (retval != ERROR_OK)
1759 return retval;
1760 } else {
1761 retval = target_write_memory(target,
1762 breakpoint->address & 0xFFFFFFFE,
1763 2, 1, breakpoint->orig_instr);
1764 if (retval != ERROR_OK)
1765 return retval;
1766 }
1767
1768 /* update i-cache at breakpoint location */
1769 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1770 breakpoint->length);
1771 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1772 breakpoint->length);
1773 }
1774 breakpoint->set = 0;
1775
1776 return ERROR_OK;
1777 }
1778
1779 static int cortex_a_add_breakpoint(struct target *target,
1780 struct breakpoint *breakpoint)
1781 {
1782 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1783
1784 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1785 LOG_INFO("no hardware breakpoint available");
1786 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1787 }
1788
1789 if (breakpoint->type == BKPT_HARD)
1790 cortex_a->brp_num_available--;
1791
1792 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1793 }
1794
1795 static int cortex_a_add_context_breakpoint(struct target *target,
1796 struct breakpoint *breakpoint)
1797 {
1798 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1799
1800 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1801 LOG_INFO("no hardware breakpoint available");
1802 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1803 }
1804
1805 if (breakpoint->type == BKPT_HARD)
1806 cortex_a->brp_num_available--;
1807
1808 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1809 }
1810
1811 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1812 struct breakpoint *breakpoint)
1813 {
1814 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1815
1816 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1817 LOG_INFO("no hardware breakpoint available");
1818 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1819 }
1820
1821 if (breakpoint->type == BKPT_HARD)
1822 cortex_a->brp_num_available--;
1823
1824 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1825 }
1826
1827
1828 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1829 {
1830 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1831
1832 #if 0
1833 /* It is perfectly possible to remove breakpoints while the target is running */
1834 if (target->state != TARGET_HALTED) {
1835 LOG_WARNING("target not halted");
1836 return ERROR_TARGET_NOT_HALTED;
1837 }
1838 #endif
1839
1840 if (breakpoint->set) {
1841 cortex_a_unset_breakpoint(target, breakpoint);
1842 if (breakpoint->type == BKPT_HARD)
1843 cortex_a->brp_num_available++;
1844 }
1845
1846
1847 return ERROR_OK;
1848 }
1849
1850 /*
1851 * Cortex-A Reset functions
1852 */
1853
1854 static int cortex_a_assert_reset(struct target *target)
1855 {
1856 struct armv7a_common *armv7a = target_to_armv7a(target);
1857
1858 LOG_DEBUG(" ");
1859
1860 /* FIXME when halt is requested, make it work somehow... */
1861
1862 /* This function can be called in "target not examined" state */
1863
1864 /* Issue some kind of warm reset. */
1865 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1866 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1867 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1868 /* REVISIT handle "pulls" cases, if there's
1869 * hardware that needs them to work.
1870 */
1871
1872 /*
1873 * FIXME: fix reset when transport is SWD. This is a temporary
1874 * work-around for release v0.10 that is not intended to stay!
1875 */
1876 if (transport_is_swd() ||
1877 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1878 jtag_add_reset(0, 1);
1879
1880 } else {
1881 LOG_ERROR("%s: how to reset?", target_name(target));
1882 return ERROR_FAIL;
1883 }
1884
1885 /* registers are now invalid */
1886 if (target_was_examined(target))
1887 register_cache_invalidate(armv7a->arm.core_cache);
1888
1889 target->state = TARGET_RESET;
1890
1891 return ERROR_OK;
1892 }
1893
1894 static int cortex_a_deassert_reset(struct target *target)
1895 {
1896 int retval;
1897
1898 LOG_DEBUG(" ");
1899
1900 /* be certain SRST is off */
1901 jtag_add_reset(0, 0);
1902
1903 if (target_was_examined(target)) {
1904 retval = cortex_a_poll(target);
1905 if (retval != ERROR_OK)
1906 return retval;
1907 }
1908
1909 if (target->reset_halt) {
1910 if (target->state != TARGET_HALTED) {
1911 LOG_WARNING("%s: ran after reset and before halt ...",
1912 target_name(target));
1913 if (target_was_examined(target)) {
1914 retval = target_halt(target);
1915 if (retval != ERROR_OK)
1916 return retval;
1917 } else
1918 target->state = TARGET_UNKNOWN;
1919 }
1920 }
1921
1922 return ERROR_OK;
1923 }
1924
1925 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1926 {
1927 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1928 * New desired mode must be in mode. Current value of DSCR must be in
1929 * *dscr, which is updated with new value.
1930 *
1931 * This function elides actually sending the mode-change over the debug
1932 * interface if the mode is already set as desired.
1933 */
1934 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1935 if (new_dscr != *dscr) {
1936 struct armv7a_common *armv7a = target_to_armv7a(target);
1937 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1938 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1939 if (retval == ERROR_OK)
1940 *dscr = new_dscr;
1941 return retval;
1942 } else {
1943 return ERROR_OK;
1944 }
1945 }
1946
1947 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1948 uint32_t value, uint32_t *dscr)
1949 {
1950 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1951 struct armv7a_common *armv7a = target_to_armv7a(target);
1952 int64_t then = timeval_ms();
1953 int retval;
1954
1955 while ((*dscr & mask) != value) {
1956 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1957 armv7a->debug_base + CPUDBG_DSCR, dscr);
1958 if (retval != ERROR_OK)
1959 return retval;
1960 if (timeval_ms() > then + 1000) {
1961 LOG_ERROR("timeout waiting for DSCR bit change");
1962 return ERROR_FAIL;
1963 }
1964 }
1965 return ERROR_OK;
1966 }
1967
1968 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1969 uint32_t *data, uint32_t *dscr)
1970 {
1971 int retval;
1972 struct armv7a_common *armv7a = target_to_armv7a(target);
1973
1974 /* Move from coprocessor to R0. */
1975 retval = cortex_a_exec_opcode(target, opcode, dscr);
1976 if (retval != ERROR_OK)
1977 return retval;
1978
1979 /* Move from R0 to DTRTX. */
1980 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1981 if (retval != ERROR_OK)
1982 return retval;
1983
1984 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1985 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1986 * must also check TXfull_l). Most of the time this will be free
1987 * because TXfull_l will be set immediately and cached in dscr. */
1988 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1989 DSCR_DTRTX_FULL_LATCHED, dscr);
1990 if (retval != ERROR_OK)
1991 return retval;
1992
1993 /* Read the value transferred to DTRTX. */
1994 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1995 armv7a->debug_base + CPUDBG_DTRTX, data);
1996 if (retval != ERROR_OK)
1997 return retval;
1998
1999 return ERROR_OK;
2000 }
2001
2002 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2003 uint32_t *dfsr, uint32_t *dscr)
2004 {
2005 int retval;
2006
2007 if (dfar) {
2008 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2009 if (retval != ERROR_OK)
2010 return retval;
2011 }
2012
2013 if (dfsr) {
2014 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2015 if (retval != ERROR_OK)
2016 return retval;
2017 }
2018
2019 return ERROR_OK;
2020 }
2021
2022 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2023 uint32_t data, uint32_t *dscr)
2024 {
2025 int retval;
2026 struct armv7a_common *armv7a = target_to_armv7a(target);
2027
2028 /* Write the value into DTRRX. */
2029 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2030 armv7a->debug_base + CPUDBG_DTRRX, data);
2031 if (retval != ERROR_OK)
2032 return retval;
2033
2034 /* Move from DTRRX to R0. */
2035 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 /* Move from R0 to coprocessor. */
2040 retval = cortex_a_exec_opcode(target, opcode, dscr);
2041 if (retval != ERROR_OK)
2042 return retval;
2043
2044 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2045 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2046 * check RXfull_l). Most of the time this will be free because RXfull_l
2047 * will be cleared immediately and cached in dscr. */
2048 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2049 if (retval != ERROR_OK)
2050 return retval;
2051
2052 return ERROR_OK;
2053 }
2054
2055 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2056 uint32_t dfsr, uint32_t *dscr)
2057 {
2058 int retval;
2059
2060 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2061 if (retval != ERROR_OK)
2062 return retval;
2063
2064 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2065 if (retval != ERROR_OK)
2066 return retval;
2067
2068 return ERROR_OK;
2069 }
2070
2071 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2072 {
2073 uint32_t status, upper4;
2074
2075 if (dfsr & (1 << 9)) {
2076 /* LPAE format. */
2077 status = dfsr & 0x3f;
2078 upper4 = status >> 2;
2079 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2080 return ERROR_TARGET_TRANSLATION_FAULT;
2081 else if (status == 33)
2082 return ERROR_TARGET_UNALIGNED_ACCESS;
2083 else
2084 return ERROR_TARGET_DATA_ABORT;
2085 } else {
2086 /* Normal format. */
2087 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2088 if (status == 1)
2089 return ERROR_TARGET_UNALIGNED_ACCESS;
2090 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2091 status == 9 || status == 11 || status == 13 || status == 15)
2092 return ERROR_TARGET_TRANSLATION_FAULT;
2093 else
2094 return ERROR_TARGET_DATA_ABORT;
2095 }
2096 }
2097
2098 static int cortex_a_write_cpu_memory_slow(struct target *target,
2099 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2100 {
2101 /* Writes count objects of size size from *buffer. Old value of DSCR must
2102 * be in *dscr; updated to new value. This is slow because it works for
2103 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2104 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2105 * preferred.
2106 * Preconditions:
2107 * - Address is in R0.
2108 * - R0 is marked dirty.
2109 */
2110 struct armv7a_common *armv7a = target_to_armv7a(target);
2111 struct arm *arm = &armv7a->arm;
2112 int retval;
2113
2114 /* Mark register R1 as dirty, to use for transferring data. */
2115 arm_reg_current(arm, 1)->dirty = true;
2116
2117 /* Switch to non-blocking mode if not already in that mode. */
2118 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2119 if (retval != ERROR_OK)
2120 return retval;
2121
2122 /* Go through the objects. */
2123 while (count) {
2124 /* Write the value to store into DTRRX. */
2125 uint32_t data, opcode;
2126 if (size == 1)
2127 data = *buffer;
2128 else if (size == 2)
2129 data = target_buffer_get_u16(target, buffer);
2130 else
2131 data = target_buffer_get_u32(target, buffer);
2132 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2133 armv7a->debug_base + CPUDBG_DTRRX, data);
2134 if (retval != ERROR_OK)
2135 return retval;
2136
2137 /* Transfer the value from DTRRX to R1. */
2138 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2139 if (retval != ERROR_OK)
2140 return retval;
2141
2142 /* Write the value transferred to R1 into memory. */
2143 if (size == 1)
2144 opcode = ARMV4_5_STRB_IP(1, 0);
2145 else if (size == 2)
2146 opcode = ARMV4_5_STRH_IP(1, 0);
2147 else
2148 opcode = ARMV4_5_STRW_IP(1, 0);
2149 retval = cortex_a_exec_opcode(target, opcode, dscr);
2150 if (retval != ERROR_OK)
2151 return retval;
2152
2153 /* Check for faults and return early. */
2154 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2155 return ERROR_OK; /* A data fault is not considered a system failure. */
2156
2157 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2158 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2159 * must also check RXfull_l). Most of the time this will be free
2160 * because RXfull_l will be cleared immediately and cached in dscr. */
2161 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2162 if (retval != ERROR_OK)
2163 return retval;
2164
2165 /* Advance. */
2166 buffer += size;
2167 --count;
2168 }
2169
2170 return ERROR_OK;
2171 }
2172
2173 static int cortex_a_write_cpu_memory_fast(struct target *target,
2174 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2175 {
2176 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2177 * in *dscr; updated to new value. This is fast but only works for
2178 * word-sized objects at aligned addresses.
2179 * Preconditions:
2180 * - Address is in R0 and must be a multiple of 4.
2181 * - R0 is marked dirty.
2182 */
2183 struct armv7a_common *armv7a = target_to_armv7a(target);
2184 int retval;
2185
2186 /* Switch to fast mode if not already in that mode. */
2187 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2188 if (retval != ERROR_OK)
2189 return retval;
2190
2191 /* Latch STC instruction. */
2192 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2193 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2194 if (retval != ERROR_OK)
2195 return retval;
2196
2197 /* Transfer all the data and issue all the instructions. */
2198 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2199 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2200 }
2201
2202 static int cortex_a_write_cpu_memory(struct target *target,
2203 uint32_t address, uint32_t size,
2204 uint32_t count, const uint8_t *buffer)
2205 {
2206 /* Write memory through the CPU. */
2207 int retval, final_retval;
2208 struct armv7a_common *armv7a = target_to_armv7a(target);
2209 struct arm *arm = &armv7a->arm;
2210 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2211
2212 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2213 address, size, count);
2214 if (target->state != TARGET_HALTED) {
2215 LOG_WARNING("target not halted");
2216 return ERROR_TARGET_NOT_HALTED;
2217 }
2218
2219 if (!count)
2220 return ERROR_OK;
2221
2222 /* Clear any abort. */
2223 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2224 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2225 if (retval != ERROR_OK)
2226 return retval;
2227
2228 /* Read DSCR. */
2229 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2230 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2231 if (retval != ERROR_OK)
2232 return retval;
2233
2234 /* Switch to non-blocking mode if not already in that mode. */
2235 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2236 if (retval != ERROR_OK)
2237 goto out;
2238
2239 /* Mark R0 as dirty. */
2240 arm_reg_current(arm, 0)->dirty = true;
2241
2242 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2243 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2244 if (retval != ERROR_OK)
2245 goto out;
2246
2247 /* Get the memory address into R0. */
2248 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2249 armv7a->debug_base + CPUDBG_DTRRX, address);
2250 if (retval != ERROR_OK)
2251 goto out;
2252 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2253 if (retval != ERROR_OK)
2254 goto out;
2255
2256 if (size == 4 && (address % 4) == 0) {
2257 /* We are doing a word-aligned transfer, so use fast mode. */
2258 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2259 } else {
2260 /* Use slow path. */
2261 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2262 }
2263
2264 out:
2265 final_retval = retval;
2266
2267 /* Switch to non-blocking mode if not already in that mode. */
2268 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2269 if (final_retval == ERROR_OK)
2270 final_retval = retval;
2271
2272 /* Wait for last issued instruction to complete. */
2273 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2274 if (final_retval == ERROR_OK)
2275 final_retval = retval;
2276
2277 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2278 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2279 * check RXfull_l). Most of the time this will be free because RXfull_l
2280 * will be cleared immediately and cached in dscr. However, don't do this
2281 * if there is fault, because then the instruction might not have completed
2282 * successfully. */
2283 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2284 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2285 if (retval != ERROR_OK)
2286 return retval;
2287 }
2288
2289 /* If there were any sticky abort flags, clear them. */
2290 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2291 fault_dscr = dscr;
2292 mem_ap_write_atomic_u32(armv7a->debug_ap,
2293 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2294 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2295 } else {
2296 fault_dscr = 0;
2297 }
2298
2299 /* Handle synchronous data faults. */
2300 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2301 if (final_retval == ERROR_OK) {
2302 /* Final return value will reflect cause of fault. */
2303 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2304 if (retval == ERROR_OK) {
2305 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2306 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2307 } else
2308 final_retval = retval;
2309 }
2310 /* Fault destroyed DFAR/DFSR; restore them. */
2311 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2312 if (retval != ERROR_OK)
2313 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2314 }
2315
2316 /* Handle asynchronous data faults. */
2317 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2318 if (final_retval == ERROR_OK)
2319 /* No other error has been recorded so far, so keep this one. */
2320 final_retval = ERROR_TARGET_DATA_ABORT;
2321 }
2322
2323 /* If the DCC is nonempty, clear it. */
2324 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2325 uint32_t dummy;
2326 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2327 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2328 if (final_retval == ERROR_OK)
2329 final_retval = retval;
2330 }
2331 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2332 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2333 if (final_retval == ERROR_OK)
2334 final_retval = retval;
2335 }
2336
2337 /* Done. */
2338 return final_retval;
2339 }
2340
2341 static int cortex_a_read_cpu_memory_slow(struct target *target,
2342 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2343 {
2344 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2345 * in *dscr; updated to new value. This is slow because it works for
2346 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2347 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2348 * preferred.
2349 * Preconditions:
2350 * - Address is in R0.
2351 * - R0 is marked dirty.
2352 */
2353 struct armv7a_common *armv7a = target_to_armv7a(target);
2354 struct arm *arm = &armv7a->arm;
2355 int retval;
2356
2357 /* Mark register R1 as dirty, to use for transferring data. */
2358 arm_reg_current(arm, 1)->dirty = true;
2359
2360 /* Switch to non-blocking mode if not already in that mode. */
2361 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2362 if (retval != ERROR_OK)
2363 return retval;
2364
2365 /* Go through the objects. */
2366 while (count) {
2367 /* Issue a load of the appropriate size to R1. */
2368 uint32_t opcode, data;
2369 if (size == 1)
2370 opcode = ARMV4_5_LDRB_IP(1, 0);
2371 else if (size == 2)
2372 opcode = ARMV4_5_LDRH_IP(1, 0);
2373 else
2374 opcode = ARMV4_5_LDRW_IP(1, 0);
2375 retval = cortex_a_exec_opcode(target, opcode, dscr);
2376 if (retval != ERROR_OK)
2377 return retval;
2378
2379 /* Issue a write of R1 to DTRTX. */
2380 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2381 if (retval != ERROR_OK)
2382 return retval;
2383
2384 /* Check for faults and return early. */
2385 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2386 return ERROR_OK; /* A data fault is not considered a system failure. */
2387
2388 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2389 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2390 * must also check TXfull_l). Most of the time this will be free
2391 * because TXfull_l will be set immediately and cached in dscr. */
2392 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2393 DSCR_DTRTX_FULL_LATCHED, dscr);
2394 if (retval != ERROR_OK)
2395 return retval;
2396
2397 /* Read the value transferred to DTRTX into the buffer. */
2398 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2399 armv7a->debug_base + CPUDBG_DTRTX, &data);
2400 if (retval != ERROR_OK)
2401 return retval;
2402 if (size == 1)
2403 *buffer = (uint8_t) data;
2404 else if (size == 2)
2405 target_buffer_set_u16(target, buffer, (uint16_t) data);
2406 else
2407 target_buffer_set_u32(target, buffer, data);
2408
2409 /* Advance. */
2410 buffer += size;
2411 --count;
2412 }
2413
2414 return ERROR_OK;
2415 }
2416
2417 static int cortex_a_read_cpu_memory_fast(struct target *target,
2418 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2419 {
2420 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2421 * *dscr; updated to new value. This is fast but only works for word-sized
2422 * objects at aligned addresses.
2423 * Preconditions:
2424 * - Address is in R0 and must be a multiple of 4.
2425 * - R0 is marked dirty.
2426 */
2427 struct armv7a_common *armv7a = target_to_armv7a(target);
2428 uint32_t u32;
2429 int retval;
2430
2431 /* Switch to non-blocking mode if not already in that mode. */
2432 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2433 if (retval != ERROR_OK)
2434 return retval;
2435
2436 /* Issue the LDC instruction via a write to ITR. */
2437 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2438 if (retval != ERROR_OK)
2439 return retval;
2440
2441 count--;
2442
2443 if (count > 0) {
2444 /* Switch to fast mode if not already in that mode. */
2445 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2446 if (retval != ERROR_OK)
2447 return retval;
2448
2449 /* Latch LDC instruction. */
2450 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2451 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2452 if (retval != ERROR_OK)
2453 return retval;
2454
2455 /* Read the value transferred to DTRTX into the buffer. Due to fast
2456 * mode rules, this blocks until the instruction finishes executing and
2457 * then reissues the read instruction to read the next word from
2458 * memory. The last read of DTRTX in this call reads the second-to-last
2459 * word from memory and issues the read instruction for the last word.
2460 */
2461 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2462 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2463 if (retval != ERROR_OK)
2464 return retval;
2465
2466 /* Advance. */
2467 buffer += count * 4;
2468 }
2469
2470 /* Wait for last issued instruction to complete. */
2471 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2472 if (retval != ERROR_OK)
2473 return retval;
2474
2475 /* Switch to non-blocking mode if not already in that mode. */
2476 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2477 if (retval != ERROR_OK)
2478 return retval;
2479
2480 /* Check for faults and return early. */
2481 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2482 return ERROR_OK; /* A data fault is not considered a system failure. */
2483
2484 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2485 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2486 * check TXfull_l). Most of the time this will be free because TXfull_l
2487 * will be set immediately and cached in dscr. */
2488 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2489 DSCR_DTRTX_FULL_LATCHED, dscr);
2490 if (retval != ERROR_OK)
2491 return retval;
2492
2493 /* Read the value transferred to DTRTX into the buffer. This is the last
2494 * word. */
2495 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2496 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2497 if (retval != ERROR_OK)
2498 return retval;
2499 target_buffer_set_u32(target, buffer, u32);
2500
2501 return ERROR_OK;
2502 }
2503
2504 static int cortex_a_read_cpu_memory(struct target *target,
2505 uint32_t address, uint32_t size,
2506 uint32_t count, uint8_t *buffer)
2507 {
2508 /* Read memory through the CPU. */
2509 int retval, final_retval;
2510 struct armv7a_common *armv7a = target_to_armv7a(target);
2511 struct arm *arm = &armv7a->arm;
2512 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2513
2514 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2515 address, size, count);
2516 if (target->state != TARGET_HALTED) {
2517 LOG_WARNING("target not halted");
2518 return ERROR_TARGET_NOT_HALTED;
2519 }
2520
2521 if (!count)
2522 return ERROR_OK;
2523
2524 /* Clear any abort. */
2525 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2526 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2527 if (retval != ERROR_OK)
2528 return retval;
2529
2530 /* Read DSCR */
2531 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2532 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2533 if (retval != ERROR_OK)
2534 return retval;
2535
2536 /* Switch to non-blocking mode if not already in that mode. */
2537 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2538 if (retval != ERROR_OK)
2539 goto out;
2540
2541 /* Mark R0 as dirty. */
2542 arm_reg_current(arm, 0)->dirty = true;
2543
2544 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2545 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2546 if (retval != ERROR_OK)
2547 goto out;
2548
2549 /* Get the memory address into R0. */
2550 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2551 armv7a->debug_base + CPUDBG_DTRRX, address);
2552 if (retval != ERROR_OK)
2553 goto out;
2554 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2555 if (retval != ERROR_OK)
2556 goto out;
2557
2558 if (size == 4 && (address % 4) == 0) {
2559 /* We are doing a word-aligned transfer, so use fast mode. */
2560 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2561 } else {
2562 /* Use slow path. */
2563 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2564 }
2565
2566 out:
2567 final_retval = retval;
2568
2569 /* Switch to non-blocking mode if not already in that mode. */
2570 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2571 if (final_retval == ERROR_OK)
2572 final_retval = retval;
2573
2574 /* Wait for last issued instruction to complete. */
2575 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2576 if (final_retval == ERROR_OK)
2577 final_retval = retval;
2578
2579 /* If there were any sticky abort flags, clear them. */
2580 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2581 fault_dscr = dscr;
2582 mem_ap_write_atomic_u32(armv7a->debug_ap,
2583 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2584 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2585 } else {
2586 fault_dscr = 0;
2587 }
2588
2589 /* Handle synchronous data faults. */
2590 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2591 if (final_retval == ERROR_OK) {
2592 /* Final return value will reflect cause of fault. */
2593 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2594 if (retval == ERROR_OK) {
2595 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2596 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2597 } else
2598 final_retval = retval;
2599 }
2600 /* Fault destroyed DFAR/DFSR; restore them. */
2601 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2602 if (retval != ERROR_OK)
2603 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2604 }
2605
2606 /* Handle asynchronous data faults. */
2607 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2608 if (final_retval == ERROR_OK)
2609 /* No other error has been recorded so far, so keep this one. */
2610 final_retval = ERROR_TARGET_DATA_ABORT;
2611 }
2612
2613 /* If the DCC is nonempty, clear it. */
2614 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2615 uint32_t dummy;
2616 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2617 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2618 if (final_retval == ERROR_OK)
2619 final_retval = retval;
2620 }
2621 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2622 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2623 if (final_retval == ERROR_OK)
2624 final_retval = retval;
2625 }
2626
2627 /* Done. */
2628 return final_retval;
2629 }
2630
2631
2632 /*
2633 * Cortex-A Memory access
2634 *
2635 * This is same Cortex-M3 but we must also use the correct
2636 * ap number for every access.
2637 */
2638
2639 static int cortex_a_read_phys_memory(struct target *target,
2640 uint32_t address, uint32_t size,
2641 uint32_t count, uint8_t *buffer)
2642 {
2643 struct armv7a_common *armv7a = target_to_armv7a(target);
2644 struct adiv5_dap *swjdp = armv7a->arm.dap;
2645 uint8_t apsel = swjdp->apsel;
2646 int retval;
2647
2648 if (!count || !buffer)
2649 return ERROR_COMMAND_SYNTAX_ERROR;
2650
2651 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2652 address, size, count);
2653
2654 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2655 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2656
2657 /* read memory through the CPU */
2658 cortex_a_prep_memaccess(target, 1);
2659 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2660 cortex_a_post_memaccess(target, 1);
2661
2662 return retval;
2663 }
2664
2665 static int cortex_a_read_memory(struct target *target, uint32_t address,
2666 uint32_t size, uint32_t count, uint8_t *buffer)
2667 {
2668 int retval;
2669
2670 /* cortex_a handles unaligned memory access */
2671 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2672 size, count);
2673
2674 cortex_a_prep_memaccess(target, 0);
2675 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2676 cortex_a_post_memaccess(target, 0);
2677
2678 return retval;
2679 }
2680
2681 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2682 uint32_t size, uint32_t count, uint8_t *buffer)
2683 {
2684 int mmu_enabled = 0;
2685 uint32_t virt, phys;
2686 int retval;
2687 struct armv7a_common *armv7a = target_to_armv7a(target);
2688 struct adiv5_dap *swjdp = armv7a->arm.dap;
2689 uint8_t apsel = swjdp->apsel;
2690
2691 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2692 return target_read_memory(target, address, size, count, buffer);
2693
2694 /* cortex_a handles unaligned memory access */
2695 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2696 size, count);
2697
2698 /* determine if MMU was enabled on target stop */
2699 if (!armv7a->is_armv7r) {
2700 retval = cortex_a_mmu(target, &mmu_enabled);
2701 if (retval != ERROR_OK)
2702 return retval;
2703 }
2704
2705 if (mmu_enabled) {
2706 virt = address;
2707 retval = cortex_a_virt2phys(target, virt, &phys);
2708 if (retval != ERROR_OK)
2709 return retval;
2710
2711 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2712 virt, phys);
2713 address = phys;
2714 }
2715
2716 if (!count || !buffer)
2717 return ERROR_COMMAND_SYNTAX_ERROR;
2718
2719 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2720
2721 return retval;
2722 }
2723
2724 static int cortex_a_write_phys_memory(struct target *target,
2725 uint32_t address, uint32_t size,
2726 uint32_t count, const uint8_t *buffer)
2727 {
2728 struct armv7a_common *armv7a = target_to_armv7a(target);
2729 struct adiv5_dap *swjdp = armv7a->arm.dap;
2730 uint8_t apsel = swjdp->apsel;
2731 int retval;
2732
2733 if (!count || !buffer)
2734 return ERROR_COMMAND_SYNTAX_ERROR;
2735
2736 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2737 size, count);
2738
2739 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2740 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2741
2742 /* write memory through the CPU */
2743 cortex_a_prep_memaccess(target, 1);
2744 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2745 cortex_a_post_memaccess(target, 1);
2746
2747 return retval;
2748 }
2749
2750 static int cortex_a_write_memory(struct target *target, uint32_t address,
2751 uint32_t size, uint32_t count, const uint8_t *buffer)
2752 {
2753 int retval;
2754
2755 /* cortex_a handles unaligned memory access */
2756 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2757 size, count);
2758
2759 /* memory writes bypass the caches, must flush before writing */
2760 armv7a_cache_auto_flush_on_write(target, address, size * count);
2761
2762 cortex_a_prep_memaccess(target, 0);
2763 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2764 cortex_a_post_memaccess(target, 0);
2765 return retval;
2766 }
2767
2768 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2769 uint32_t size, uint32_t count, const uint8_t *buffer)
2770 {
2771 int mmu_enabled = 0;
2772 uint32_t virt, phys;
2773 int retval;
2774 struct armv7a_common *armv7a = target_to_armv7a(target);
2775 struct adiv5_dap *swjdp = armv7a->arm.dap;
2776 uint8_t apsel = swjdp->apsel;
2777
2778 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2779 return target_write_memory(target, address, size, count, buffer);
2780
2781 /* cortex_a handles unaligned memory access */
2782 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2783 size, count);
2784
2785 /* determine if MMU was enabled on target stop */
2786 if (!armv7a->is_armv7r) {
2787 retval = cortex_a_mmu(target, &mmu_enabled);
2788 if (retval != ERROR_OK)
2789 return retval;
2790 }
2791
2792 if (mmu_enabled) {
2793 virt = address;
2794 retval = cortex_a_virt2phys(target, virt, &phys);
2795 if (retval != ERROR_OK)
2796 return retval;
2797
2798 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2799 virt,
2800 phys);
2801 address = phys;
2802 }
2803
2804 if (!count || !buffer)
2805 return ERROR_COMMAND_SYNTAX_ERROR;
2806
2807 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2808
2809 return retval;
2810 }
2811
2812 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2813 uint32_t count, uint8_t *buffer)
2814 {
2815 uint32_t size;
2816
2817 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2818 * will have something to do with the size we leave to it. */
2819 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2820 if (address & size) {
2821 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2822 if (retval != ERROR_OK)
2823 return retval;
2824 address += size;
2825 count -= size;
2826 buffer += size;
2827 }
2828 }
2829
2830 /* Read the data with as large access size as possible. */
2831 for (; size > 0; size /= 2) {
2832 uint32_t aligned = count - count % size;
2833 if (aligned > 0) {
2834 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2835 if (retval != ERROR_OK)
2836 return retval;
2837 address += aligned;
2838 count -= aligned;
2839 buffer += aligned;
2840 }
2841 }
2842
2843 return ERROR_OK;
2844 }
2845
2846 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2847 uint32_t count, const uint8_t *buffer)
2848 {
2849 uint32_t size;
2850
2851 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2852 * will have something to do with the size we leave to it. */
2853 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2854 if (address & size) {
2855 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2856 if (retval != ERROR_OK)
2857 return retval;
2858 address += size;
2859 count -= size;
2860 buffer += size;
2861 }
2862 }
2863
2864 /* Write the data with as large access size as possible. */
2865 for (; size > 0; size /= 2) {
2866 uint32_t aligned = count - count % size;
2867 if (aligned > 0) {
2868 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2869 if (retval != ERROR_OK)
2870 return retval;
2871 address += aligned;
2872 count -= aligned;
2873 buffer += aligned;
2874 }
2875 }
2876
2877 return ERROR_OK;
2878 }
2879
2880 static int cortex_a_handle_target_request(void *priv)
2881 {
2882 struct target *target = priv;
2883 struct armv7a_common *armv7a = target_to_armv7a(target);
2884 int retval;
2885
2886 if (!target_was_examined(target))
2887 return ERROR_OK;
2888 if (!target->dbg_msg_enabled)
2889 return ERROR_OK;
2890
2891 if (target->state == TARGET_RUNNING) {
2892 uint32_t request;
2893 uint32_t dscr;
2894 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2895 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2896
2897 /* check if we have data */
2898 int64_t then = timeval_ms();
2899 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2900 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2901 armv7a->debug_base + CPUDBG_DTRTX, &request);
2902 if (retval == ERROR_OK) {
2903 target_request(target, request);
2904 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2905 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2906 }
2907 if (timeval_ms() > then + 1000) {
2908 LOG_ERROR("Timeout waiting for dtr tx full");
2909 return ERROR_FAIL;
2910 }
2911 }
2912 }
2913
2914 return ERROR_OK;
2915 }
2916
2917 /*
2918 * Cortex-A target information and configuration
2919 */
2920
2921 static int cortex_a_examine_first(struct target *target)
2922 {
2923 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2924 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2925 struct adiv5_dap *swjdp = armv7a->arm.dap;
2926
2927 int i;
2928 int retval = ERROR_OK;
2929 uint32_t didr, cpuid, dbg_osreg;
2930
2931 retval = dap_dp_init(swjdp);
2932 if (retval != ERROR_OK) {
2933 LOG_ERROR("Could not initialize the debug port");
2934 return retval;
2935 }
2936
2937 /* Search for the APB-AP - it is needed for access to debug registers */
2938 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2939 if (retval != ERROR_OK) {
2940 LOG_ERROR("Could not find APB-AP for debug access");
2941 return retval;
2942 }
2943
2944 retval = mem_ap_init(armv7a->debug_ap);
2945 if (retval != ERROR_OK) {
2946 LOG_ERROR("Could not initialize the APB-AP");
2947 return retval;
2948 }
2949
2950 armv7a->debug_ap->memaccess_tck = 80;
2951
2952 /* Search for the AHB-AB.
2953 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2954 * can access system memory. */
2955 armv7a->memory_ap_available = false;
2956 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2957 if (retval == ERROR_OK) {
2958 retval = mem_ap_init(armv7a->memory_ap);
2959 if (retval == ERROR_OK)
2960 armv7a->memory_ap_available = true;
2961 }
2962 if (retval != ERROR_OK) {
2963 /* AHB-AP not found or unavailable - use the CPU */
2964 LOG_DEBUG("No AHB-AP available for memory access");
2965 }
2966
2967 if (!target->dbgbase_set) {
2968 uint32_t dbgbase;
2969 /* Get ROM Table base */
2970 uint32_t apid;
2971 int32_t coreidx = target->coreid;
2972 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2973 target->cmd_name);
2974 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2975 if (retval != ERROR_OK)
2976 return retval;
2977 /* Lookup 0x15 -- Processor DAP */
2978 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2979 &armv7a->debug_base, &coreidx);
2980 if (retval != ERROR_OK) {
2981 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2982 target->cmd_name);
2983 return retval;
2984 }
2985 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2986 target->coreid, armv7a->debug_base);
2987 } else
2988 armv7a->debug_base = target->dbgbase;
2989
2990 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2991 armv7a->debug_base + CPUDBG_DIDR, &didr);
2992 if (retval != ERROR_OK) {
2993 LOG_DEBUG("Examine %s failed", "DIDR");
2994 return retval;
2995 }
2996
2997 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2998 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2999 if (retval != ERROR_OK) {
3000 LOG_DEBUG("Examine %s failed", "CPUID");
3001 return retval;
3002 }
3003
3004 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3005 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3006
3007 cortex_a->didr = didr;
3008 cortex_a->cpuid = cpuid;
3009
3010 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3011 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3012 if (retval != ERROR_OK)
3013 return retval;
3014 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3015
3016 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3017 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
3018 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3019 return ERROR_TARGET_INIT_FAILED;
3020 }
3021
3022 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3023 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
3024
3025 /* Read DBGOSLSR and check if OSLK is implemented */
3026 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3027 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3028 if (retval != ERROR_OK)
3029 return retval;
3030 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
3031
3032 /* check if OS Lock is implemented */
3033 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3034 /* check if OS Lock is set */
3035 if (dbg_osreg & OSLSR_OSLK) {
3036 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
3037
3038 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3039 armv7a->debug_base + CPUDBG_OSLAR,
3040 0);
3041 if (retval == ERROR_OK)
3042 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3043 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3044
3045 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3046 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3047 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3048 target->coreid);
3049 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3050 return ERROR_TARGET_INIT_FAILED;
3051 }
3052 }
3053 }
3054
3055 armv7a->arm.core_type = ARM_MODE_MON;
3056
3057 /* Avoid recreating the registers cache */
3058 if (!target_was_examined(target)) {
3059 retval = cortex_a_dpm_setup(cortex_a, didr);
3060 if (retval != ERROR_OK)
3061 return retval;
3062 }
3063
3064 /* Setup Breakpoint Register Pairs */
3065 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3066 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3067 cortex_a->brp_num_available = cortex_a->brp_num;
3068 free(cortex_a->brp_list);
3069 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3070 /* cortex_a->brb_enabled = ????; */
3071 for (i = 0; i < cortex_a->brp_num; i++) {
3072 cortex_a->brp_list[i].used = 0;
3073 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3074 cortex_a->brp_list[i].type = BRP_NORMAL;
3075 else
3076 cortex_a->brp_list[i].type = BRP_CONTEXT;
3077 cortex_a->brp_list[i].value = 0;
3078 cortex_a->brp_list[i].control = 0;
3079 cortex_a->brp_list[i].BRPn = i;
3080 }
3081
3082 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3083
3084 /* select debug_ap as default */
3085 swjdp->apsel = armv7a->debug_ap->ap_num;
3086
3087 target_set_examined(target);
3088 return ERROR_OK;
3089 }
3090
3091 static int cortex_a_examine(struct target *target)
3092 {
3093 int retval = ERROR_OK;
3094
3095 /* Reestablish communication after target reset */
3096 retval = cortex_a_examine_first(target);
3097
3098 /* Configure core debug access */
3099 if (retval == ERROR_OK)
3100 retval = cortex_a_init_debug_access(target);
3101
3102 return retval;
3103 }
3104
3105 /*
3106 * Cortex-A target creation and initialization
3107 */
3108
3109 static int cortex_a_init_target(struct command_context *cmd_ctx,
3110 struct target *target)
3111 {
3112 /* examine_first() does a bunch of this */
3113 arm_semihosting_init(target);
3114 return ERROR_OK;
3115 }
3116
3117 static int cortex_a_init_arch_info(struct target *target,
3118 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3119 {
3120 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3121
3122 /* Setup struct cortex_a_common */
3123 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3124
3125 /* tap has no dap initialized */
3126 if (!tap->dap) {
3127 tap->dap = dap_init();
3128
3129 /* Leave (only) generic DAP stuff for debugport_init() */
3130 tap->dap->tap = tap;
3131 }
3132
3133 armv7a->arm.dap = tap->dap;
3134
3135 cortex_a->fast_reg_read = 0;
3136
3137 /* register arch-specific functions */
3138 armv7a->examine_debug_reason = NULL;
3139
3140 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3141
3142 armv7a->pre_restore_context = NULL;
3143
3144 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3145
3146
3147 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3148
3149 /* REVISIT v7a setup should be in a v7a-specific routine */
3150 armv7a_init_arch_info(target, armv7a);
3151 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3152
3153 return ERROR_OK;
3154 }
3155
3156 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3157 {
3158 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3159
3160 cortex_a->armv7a_common.is_armv7r = false;
3161
3162 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3163 }
3164
3165 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3166 {
3167 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3168
3169 cortex_a->armv7a_common.is_armv7r = true;
3170
3171 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3172 }
3173
3174 static void cortex_a_deinit_target(struct target *target)
3175 {
3176 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3177 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3178
3179 free(cortex_a->brp_list);
3180 free(dpm->dbp);
3181 free(dpm->dwp);
3182 free(cortex_a);
3183 }
3184
3185 static int cortex_a_mmu(struct target *target, int *enabled)
3186 {
3187 struct armv7a_common *armv7a = target_to_armv7a(target);
3188
3189 if (target->state != TARGET_HALTED) {
3190 LOG_ERROR("%s: target not halted", __func__);
3191 return ERROR_TARGET_INVALID;
3192 }
3193
3194 if (armv7a->is_armv7r)
3195 *enabled = 0;
3196 else
3197 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3198
3199 return ERROR_OK;
3200 }
3201
3202 static int cortex_a_virt2phys(struct target *target,
3203 uint32_t virt, uint32_t *phys)
3204 {
3205 int retval = ERROR_FAIL;
3206 struct armv7a_common *armv7a = target_to_armv7a(target);
3207 struct adiv5_dap *swjdp = armv7a->arm.dap;
3208 uint8_t apsel = swjdp->apsel;
3209 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3210 uint32_t ret;
3211 retval = armv7a_mmu_translate_va(target,
3212 virt, &ret);
3213 if (retval != ERROR_OK)
3214 goto done;
3215 *phys = ret;
3216 } else {/* use this method if armv7a->memory_ap not selected
3217 * mmu must be enable in order to get a correct translation */
3218 retval = cortex_a_mmu_modify(target, 1);
3219 if (retval != ERROR_OK)
3220 goto done;
3221 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3222 }
3223 done:
3224 return retval;
3225 }
3226
3227 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3228 {
3229 struct target *target = get_current_target(CMD_CTX);
3230 struct armv7a_common *armv7a = target_to_armv7a(target);
3231
3232 return armv7a_handle_cache_info_command(CMD_CTX,
3233 &armv7a->armv7a_mmu.armv7a_cache);
3234 }
3235
3236
3237 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3238 {
3239 struct target *target = get_current_target(CMD_CTX);
3240 if (!target_was_examined(target)) {
3241 LOG_ERROR("target not examined yet");
3242 return ERROR_FAIL;
3243 }
3244
3245 return cortex_a_init_debug_access(target);
3246 }
3247 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3248 {
3249 struct target *target = get_current_target(CMD_CTX);
3250 /* check target is an smp target */
3251 struct target_list *head;
3252 struct target *curr;
3253 head = target->head;
3254 target->smp = 0;
3255 if (head != (struct target_list *)NULL) {
3256 while (head != (struct target_list *)NULL) {
3257 curr = head->target;
3258 curr->smp = 0;
3259 head = head->next;
3260 }
3261 /* fixes the target display to the debugger */
3262 target->gdb_service->target = target;
3263 }
3264 return ERROR_OK;
3265 }
3266
3267 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3268 {
3269 struct target *target = get_current_target(CMD_CTX);
3270 struct target_list *head;
3271 struct target *curr;
3272 head = target->head;
3273 if (head != (struct target_list *)NULL) {
3274 target->smp = 1;
3275 while (head != (struct target_list *)NULL) {
3276 curr = head->target;
3277 curr->smp = 1;
3278 head = head->next;
3279 }
3280 }
3281 return ERROR_OK;
3282 }
3283
3284 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3285 {
3286 struct target *target = get_current_target(CMD_CTX);
3287 int retval = ERROR_OK;
3288 struct target_list *head;
3289 head = target->head;
3290 if (head != (struct target_list *)NULL) {
3291 if (CMD_ARGC == 1) {
3292 int coreid = 0;
3293 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3294 if (ERROR_OK != retval)
3295 return retval;
3296 target->gdb_service->core[1] = coreid;
3297
3298 }
3299 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3300 , target->gdb_service->core[1]);
3301 }
3302 return ERROR_OK;
3303 }
3304
3305 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3306 {
3307 struct target *target = get_current_target(CMD_CTX);
3308 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3309
3310 static const Jim_Nvp nvp_maskisr_modes[] = {
3311 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3312 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3313 { .name = NULL, .value = -1 },
3314 };
3315 const Jim_Nvp *n;
3316
3317 if (CMD_ARGC > 0) {
3318 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3319 if (n->name == NULL) {
3320 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3321 return ERROR_COMMAND_SYNTAX_ERROR;
3322 }
3323
3324 cortex_a->isrmasking_mode = n->value;
3325 }
3326
3327 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3328 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3329
3330 return ERROR_OK;
3331 }
3332
3333 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3334 {
3335 struct target *target = get_current_target(CMD_CTX);
3336 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3337
3338 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3339 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3340 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3341 { .name = NULL, .value = -1 },
3342 };
3343 const Jim_Nvp *n;
3344
3345 if (CMD_ARGC > 0) {
3346 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3347 if (n->name == NULL)
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349 cortex_a->dacrfixup_mode = n->value;
3350
3351 }
3352
3353 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3354 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3355
3356 return ERROR_OK;
3357 }
3358
3359 static const struct command_registration cortex_a_exec_command_handlers[] = {
3360 {
3361 .name = "cache_info",
3362 .handler = cortex_a_handle_cache_info_command,
3363 .mode = COMMAND_EXEC,
3364 .help = "display information about target caches",
3365 .usage = "",
3366 },
3367 {
3368 .name = "dbginit",
3369 .handler = cortex_a_handle_dbginit_command,
3370 .mode = COMMAND_EXEC,
3371 .help = "Initialize core debug",
3372 .usage = "",
3373 },
3374 { .name = "smp_off",
3375 .handler = cortex_a_handle_smp_off_command,
3376 .mode = COMMAND_EXEC,
3377 .help = "Stop smp handling",
3378 .usage = "",},
3379 {
3380 .name = "smp_on",
3381 .handler = cortex_a_handle_smp_on_command,
3382 .mode = COMMAND_EXEC,
3383 .help = "Restart smp handling",
3384 .usage = "",
3385 },
3386 {
3387 .name = "smp_gdb",
3388 .handler = cortex_a_handle_smp_gdb_command,
3389 .mode = COMMAND_EXEC,
3390 .help = "display/fix current core played to gdb",
3391 .usage = "",
3392 },
3393 {
3394 .name = "maskisr",
3395 .handler = handle_cortex_a_mask_interrupts_command,
3396 .mode = COMMAND_ANY,
3397 .help = "mask cortex_a interrupts",
3398 .usage = "['on'|'off']",
3399 },
3400 {
3401 .name = "dacrfixup",
3402 .handler = handle_cortex_a_dacrfixup_command,
3403 .mode = COMMAND_EXEC,
3404 .help = "set domain access control (DACR) to all-manager "
3405 "on memory access",
3406 .usage = "['on'|'off']",
3407 },
3408
3409 COMMAND_REGISTRATION_DONE
3410 };
3411 static const struct command_registration cortex_a_command_handlers[] = {
3412 {
3413 .chain = arm_command_handlers,
3414 },
3415 {
3416 .chain = armv7a_command_handlers,
3417 },
3418 {
3419 .name = "cortex_a",
3420 .mode = COMMAND_ANY,
3421 .help = "Cortex-A command group",
3422 .usage = "",
3423 .chain = cortex_a_exec_command_handlers,
3424 },
3425 COMMAND_REGISTRATION_DONE
3426 };
3427
3428 struct target_type cortexa_target = {
3429 .name = "cortex_a",
3430 .deprecated_name = "cortex_a8",
3431
3432 .poll = cortex_a_poll,
3433 .arch_state = armv7a_arch_state,
3434
3435 .halt = cortex_a_halt,
3436 .resume = cortex_a_resume,
3437 .step = cortex_a_step,
3438
3439 .assert_reset = cortex_a_assert_reset,
3440 .deassert_reset = cortex_a_deassert_reset,
3441
3442 /* REVISIT allow exporting VFP3 registers ... */
3443 .get_gdb_reg_list = arm_get_gdb_reg_list,
3444
3445 .read_memory = cortex_a_read_memory,
3446 .write_memory = cortex_a_write_memory,
3447
3448 .read_buffer = cortex_a_read_buffer,
3449 .write_buffer = cortex_a_write_buffer,
3450
3451 .checksum_memory = arm_checksum_memory,
3452 .blank_check_memory = arm_blank_check_memory,
3453
3454 .run_algorithm = armv4_5_run_algorithm,
3455
3456 .add_breakpoint = cortex_a_add_breakpoint,
3457 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3458 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3459 .remove_breakpoint = cortex_a_remove_breakpoint,
3460 .add_watchpoint = NULL,
3461 .remove_watchpoint = NULL,
3462
3463 .commands = cortex_a_command_handlers,
3464 .target_create = cortex_a_target_create,
3465 .init_target = cortex_a_init_target,
3466 .examine = cortex_a_examine,
3467 .deinit_target = cortex_a_deinit_target,
3468
3469 .read_phys_memory = cortex_a_read_phys_memory,
3470 .write_phys_memory = cortex_a_write_phys_memory,
3471 .mmu = cortex_a_mmu,
3472 .virt2phys = cortex_a_virt2phys,
3473 };
3474
3475 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3476 {
3477 .name = "cache_info",
3478 .handler = cortex_a_handle_cache_info_command,
3479 .mode = COMMAND_EXEC,
3480 .help = "display information about target caches",
3481 .usage = "",
3482 },
3483 {
3484 .name = "dbginit",
3485 .handler = cortex_a_handle_dbginit_command,
3486 .mode = COMMAND_EXEC,
3487 .help = "Initialize core debug",
3488 .usage = "",
3489 },
3490 {
3491 .name = "maskisr",
3492 .handler = handle_cortex_a_mask_interrupts_command,
3493 .mode = COMMAND_EXEC,
3494 .help = "mask cortex_r4 interrupts",
3495 .usage = "['on'|'off']",
3496 },
3497
3498 COMMAND_REGISTRATION_DONE
3499 };
3500 static const struct command_registration cortex_r4_command_handlers[] = {
3501 {
3502 .chain = arm_command_handlers,
3503 },
3504 {
3505 .chain = armv7a_command_handlers,
3506 },
3507 {
3508 .name = "cortex_r4",
3509 .mode = COMMAND_ANY,
3510 .help = "Cortex-R4 command group",
3511 .usage = "",
3512 .chain = cortex_r4_exec_command_handlers,
3513 },
3514 COMMAND_REGISTRATION_DONE
3515 };
3516
3517 struct target_type cortexr4_target = {
3518 .name = "cortex_r4",
3519
3520 .poll = cortex_a_poll,
3521 .arch_state = armv7a_arch_state,
3522
3523 .halt = cortex_a_halt,
3524 .resume = cortex_a_resume,
3525 .step = cortex_a_step,
3526
3527 .assert_reset = cortex_a_assert_reset,
3528 .deassert_reset = cortex_a_deassert_reset,
3529
3530 /* REVISIT allow exporting VFP3 registers ... */
3531 .get_gdb_reg_list = arm_get_gdb_reg_list,
3532
3533 .read_memory = cortex_a_read_phys_memory,
3534 .write_memory = cortex_a_write_phys_memory,
3535
3536 .checksum_memory = arm_checksum_memory,
3537 .blank_check_memory = arm_blank_check_memory,
3538
3539 .run_algorithm = armv4_5_run_algorithm,
3540
3541 .add_breakpoint = cortex_a_add_breakpoint,
3542 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3543 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3544 .remove_breakpoint = cortex_a_remove_breakpoint,
3545 .add_watchpoint = NULL,
3546 .remove_watchpoint = NULL,
3547
3548 .commands = cortex_r4_command_handlers,
3549 .target_create = cortex_r4_target_create,
3550 .init_target = cortex_a_init_target,
3551 .examine = cortex_a_examine,
3552 .deinit_target = cortex_a_deinit_target,
3553 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)