cortex: Set default memaccess_tck only during examine
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 uint32_t virt, uint32_t *phys);
79 static int cortex_a_read_apb_ab_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 int mmu_enabled = 0;
110
111 if (phys_access == 0) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a_mmu(target, &mmu_enabled);
114 if (mmu_enabled)
115 cortex_a_mmu_modify(target, 1);
116 } else {
117 cortex_a_mmu(target, &mmu_enabled);
118 if (mmu_enabled)
119 cortex_a_mmu_modify(target, 0);
120 }
121 return ERROR_OK;
122 }
123
124 /*
125 * Restore ARM core after memory access.
126 * If !phys_access, switch to previous mode
127 * If phys_access, restore MMU setting
128 */
129 static int cortex_a_post_memaccess(struct target *target, int phys_access)
130 {
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132
133 if (phys_access == 0) {
134 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
135 } else {
136 int mmu_enabled = 0;
137 cortex_a_mmu(target, &mmu_enabled);
138 if (mmu_enabled)
139 cortex_a_mmu_modify(target, 1);
140 }
141 return ERROR_OK;
142 }
143
144
145 /* modify cp15_control_reg in order to enable or disable mmu for :
146 * - virt2phys address conversion
147 * - read or write memory in phys or virt address */
148 static int cortex_a_mmu_modify(struct target *target, int enable)
149 {
150 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
151 struct armv7a_common *armv7a = target_to_armv7a(target);
152 int retval = ERROR_OK;
153 int need_write = 0;
154
155 if (enable) {
156 /* if mmu enabled at target stop and mmu not enable */
157 if (!(cortex_a->cp15_control_reg & 0x1U)) {
158 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
159 return ERROR_FAIL;
160 }
161 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
162 cortex_a->cp15_control_reg_curr |= 0x1U;
163 need_write = 1;
164 }
165 } else {
166 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
167 cortex_a->cp15_control_reg_curr &= ~0x1U;
168 need_write = 1;
169 }
170 }
171
172 if (need_write) {
173 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
174 enable ? "enable mmu" : "disable mmu",
175 cortex_a->cp15_control_reg_curr);
176
177 retval = armv7a->arm.mcr(target, 15,
178 0, 0, /* op1, op2 */
179 1, 0, /* CRn, CRm */
180 cortex_a->cp15_control_reg_curr);
181 }
182 return retval;
183 }
184
185 /*
186 * Cortex-A Basic debug access, very low level assumes state is saved
187 */
188 static int cortex_a8_init_debug_access(struct target *target)
189 {
190 struct armv7a_common *armv7a = target_to_armv7a(target);
191 int retval;
192
193 LOG_DEBUG(" ");
194
195 /* Unlocking the debug registers for modification
196 * The debugport might be uninitialised so try twice */
197 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
198 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
199 if (retval != ERROR_OK) {
200 /* try again */
201 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
202 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
203 if (retval == ERROR_OK)
204 LOG_USER(
205 "Locking debug access failed on first, but succeeded on second try.");
206 }
207
208 return retval;
209 }
210
211 /*
212 * Cortex-A Basic debug access, very low level assumes state is saved
213 */
214 static int cortex_a_init_debug_access(struct target *target)
215 {
216 struct armv7a_common *armv7a = target_to_armv7a(target);
217 int retval;
218 uint32_t dbg_osreg;
219 uint32_t cortex_part_num;
220 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
221
222 LOG_DEBUG(" ");
223 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
224 CORTEX_A_MIDR_PARTNUM_SHIFT;
225
226 switch (cortex_part_num) {
227 case CORTEX_A7_PARTNUM:
228 case CORTEX_A15_PARTNUM:
229 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
230 armv7a->debug_base + CPUDBG_OSLSR,
231 &dbg_osreg);
232 if (retval != ERROR_OK)
233 return retval;
234
235 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
236
237 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
238 /* Unlocking the DEBUG OS registers for modification */
239 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
240 armv7a->debug_base + CPUDBG_OSLAR,
241 0);
242 break;
243
244 case CORTEX_A5_PARTNUM:
245 case CORTEX_A8_PARTNUM:
246 case CORTEX_A9_PARTNUM:
247 default:
248 retval = cortex_a8_init_debug_access(target);
249 }
250
251 if (retval != ERROR_OK)
252 return retval;
253 /* Clear Sticky Power Down status Bit in PRSR to enable access to
254 the registers in the Core Power Domain */
255 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
256 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
257 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
258
259 if (retval != ERROR_OK)
260 return retval;
261
262 /* Disable cacheline fills and force cache write-through in debug state */
263 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
264 armv7a->debug_base + CPUDBG_DSCCR, 0);
265 if (retval != ERROR_OK)
266 return retval;
267
268 /* Disable TLB lookup and refill/eviction in debug state */
269 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
270 armv7a->debug_base + CPUDBG_DSMCR, 0);
271 if (retval != ERROR_OK)
272 return retval;
273
274 /* Enabling of instruction execution in debug mode is done in debug_entry code */
275
276 /* Resync breakpoint registers */
277
278 /* Since this is likely called from init or reset, update target state information*/
279 return cortex_a_poll(target);
280 }
281
282 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
283 {
284 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
285 * Writes final value of DSCR into *dscr. Pass force to force always
286 * reading DSCR at least once. */
287 struct armv7a_common *armv7a = target_to_armv7a(target);
288 long long then = timeval_ms();
289 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
290 force = false;
291 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
292 armv7a->debug_base + CPUDBG_DSCR, dscr);
293 if (retval != ERROR_OK) {
294 LOG_ERROR("Could not read DSCR register");
295 return retval;
296 }
297 if (timeval_ms() > then + 1000) {
298 LOG_ERROR("Timeout waiting for InstrCompl=1");
299 return ERROR_FAIL;
300 }
301 }
302 return ERROR_OK;
303 }
304
305 /* To reduce needless round-trips, pass in a pointer to the current
306 * DSCR value. Initialize it to zero if you just need to know the
307 * value on return from this function; or DSCR_INSTR_COMP if you
308 * happen to know that no instruction is pending.
309 */
310 static int cortex_a_exec_opcode(struct target *target,
311 uint32_t opcode, uint32_t *dscr_p)
312 {
313 uint32_t dscr;
314 int retval;
315 struct armv7a_common *armv7a = target_to_armv7a(target);
316
317 dscr = dscr_p ? *dscr_p : 0;
318
319 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
320
321 /* Wait for InstrCompl bit to be set */
322 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
323 if (retval != ERROR_OK)
324 return retval;
325
326 retval = mem_ap_write_u32(armv7a->debug_ap,
327 armv7a->debug_base + CPUDBG_ITR, opcode);
328 if (retval != ERROR_OK)
329 return retval;
330
331 long long then = timeval_ms();
332 do {
333 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
334 armv7a->debug_base + CPUDBG_DSCR, &dscr);
335 if (retval != ERROR_OK) {
336 LOG_ERROR("Could not read DSCR register");
337 return retval;
338 }
339 if (timeval_ms() > then + 1000) {
340 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
341 return ERROR_FAIL;
342 }
343 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
344
345 if (dscr_p)
346 *dscr_p = dscr;
347
348 return retval;
349 }
350
351 /**************************************************************************
352 Read core register with very few exec_opcode, fast but needs work_area.
353 This can cause problems with MMU active.
354 **************************************************************************/
355 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
356 uint32_t *regfile)
357 {
358 int retval = ERROR_OK;
359 struct armv7a_common *armv7a = target_to_armv7a(target);
360
361 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
362 if (retval != ERROR_OK)
363 return retval;
364 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
365 if (retval != ERROR_OK)
366 return retval;
367 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
368 if (retval != ERROR_OK)
369 return retval;
370
371 retval = mem_ap_read_buf(armv7a->memory_ap,
372 (uint8_t *)(&regfile[1]), 4, 15, address);
373
374 return retval;
375 }
376
377 static int cortex_a_dap_read_coreregister_u32(struct target *target,
378 uint32_t *value, int regnum)
379 {
380 int retval = ERROR_OK;
381 uint8_t reg = regnum&0xFF;
382 uint32_t dscr = 0;
383 struct armv7a_common *armv7a = target_to_armv7a(target);
384
385 if (reg > 17)
386 return retval;
387
388 if (reg < 15) {
389 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
390 retval = cortex_a_exec_opcode(target,
391 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
392 &dscr);
393 if (retval != ERROR_OK)
394 return retval;
395 } else if (reg == 15) {
396 /* "MOV r0, r15"; then move r0 to DCCTX */
397 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
398 if (retval != ERROR_OK)
399 return retval;
400 retval = cortex_a_exec_opcode(target,
401 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
402 &dscr);
403 if (retval != ERROR_OK)
404 return retval;
405 } else {
406 /* "MRS r0, CPSR" or "MRS r0, SPSR"
407 * then move r0 to DCCTX
408 */
409 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
410 if (retval != ERROR_OK)
411 return retval;
412 retval = cortex_a_exec_opcode(target,
413 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
414 &dscr);
415 if (retval != ERROR_OK)
416 return retval;
417 }
418
419 /* Wait for DTRRXfull then read DTRRTX */
420 long long then = timeval_ms();
421 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
422 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
423 armv7a->debug_base + CPUDBG_DSCR, &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 if (timeval_ms() > then + 1000) {
427 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
428 return ERROR_FAIL;
429 }
430 }
431
432 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
433 armv7a->debug_base + CPUDBG_DTRTX, value);
434 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
435
436 return retval;
437 }
438
439 static int cortex_a_dap_write_coreregister_u32(struct target *target,
440 uint32_t value, int regnum)
441 {
442 int retval = ERROR_OK;
443 uint8_t Rd = regnum&0xFF;
444 uint32_t dscr;
445 struct armv7a_common *armv7a = target_to_armv7a(target);
446
447 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
448
449 /* Check that DCCRX is not full */
450 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
451 armv7a->debug_base + CPUDBG_DSCR, &dscr);
452 if (retval != ERROR_OK)
453 return retval;
454 if (dscr & DSCR_DTR_RX_FULL) {
455 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
456 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
457 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
458 &dscr);
459 if (retval != ERROR_OK)
460 return retval;
461 }
462
463 if (Rd > 17)
464 return retval;
465
466 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
467 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
468 retval = mem_ap_write_u32(armv7a->debug_ap,
469 armv7a->debug_base + CPUDBG_DTRRX, value);
470 if (retval != ERROR_OK)
471 return retval;
472
473 if (Rd < 15) {
474 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
475 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
476 &dscr);
477
478 if (retval != ERROR_OK)
479 return retval;
480 } else if (Rd == 15) {
481 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
482 * then "mov r15, r0"
483 */
484 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
485 &dscr);
486 if (retval != ERROR_OK)
487 return retval;
488 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
489 if (retval != ERROR_OK)
490 return retval;
491 } else {
492 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
493 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
494 */
495 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
496 &dscr);
497 if (retval != ERROR_OK)
498 return retval;
499 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
500 &dscr);
501 if (retval != ERROR_OK)
502 return retval;
503
504 /* "Prefetch flush" after modifying execution status in CPSR */
505 if (Rd == 16) {
506 retval = cortex_a_exec_opcode(target,
507 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
508 &dscr);
509 if (retval != ERROR_OK)
510 return retval;
511 }
512 }
513
514 return retval;
515 }
516
517 /* Write to memory mapped registers directly with no cache or mmu handling */
518 static int cortex_a_dap_write_memap_register_u32(struct target *target,
519 uint32_t address,
520 uint32_t value)
521 {
522 int retval;
523 struct armv7a_common *armv7a = target_to_armv7a(target);
524
525 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
526
527 return retval;
528 }
529
530 /*
531 * Cortex-A implementation of Debug Programmer's Model
532 *
533 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
534 * so there's no need to poll for it before executing an instruction.
535 *
536 * NOTE that in several of these cases the "stall" mode might be useful.
537 * It'd let us queue a few operations together... prepare/finish might
538 * be the places to enable/disable that mode.
539 */
540
541 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
542 {
543 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
544 }
545
546 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
547 {
548 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
549 return mem_ap_write_u32(a->armv7a_common.debug_ap,
550 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
551 }
552
553 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
554 uint32_t *dscr_p)
555 {
556 uint32_t dscr = DSCR_INSTR_COMP;
557 int retval;
558
559 if (dscr_p)
560 dscr = *dscr_p;
561
562 /* Wait for DTRRXfull */
563 long long then = timeval_ms();
564 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
565 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
566 a->armv7a_common.debug_base + CPUDBG_DSCR,
567 &dscr);
568 if (retval != ERROR_OK)
569 return retval;
570 if (timeval_ms() > then + 1000) {
571 LOG_ERROR("Timeout waiting for read dcc");
572 return ERROR_FAIL;
573 }
574 }
575
576 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
577 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
578 if (retval != ERROR_OK)
579 return retval;
580 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
581
582 if (dscr_p)
583 *dscr_p = dscr;
584
585 return retval;
586 }
587
588 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
589 {
590 struct cortex_a_common *a = dpm_to_a(dpm);
591 uint32_t dscr;
592 int retval;
593
594 /* set up invariant: INSTR_COMP is set after ever DPM operation */
595 long long then = timeval_ms();
596 for (;; ) {
597 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
598 a->armv7a_common.debug_base + CPUDBG_DSCR,
599 &dscr);
600 if (retval != ERROR_OK)
601 return retval;
602 if ((dscr & DSCR_INSTR_COMP) != 0)
603 break;
604 if (timeval_ms() > then + 1000) {
605 LOG_ERROR("Timeout waiting for dpm prepare");
606 return ERROR_FAIL;
607 }
608 }
609
610 /* this "should never happen" ... */
611 if (dscr & DSCR_DTR_RX_FULL) {
612 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
613 /* Clear DCCRX */
614 retval = cortex_a_exec_opcode(
615 a->armv7a_common.arm.target,
616 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
617 &dscr);
618 if (retval != ERROR_OK)
619 return retval;
620 }
621
622 return retval;
623 }
624
625 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
626 {
627 /* REVISIT what could be done here? */
628 return ERROR_OK;
629 }
630
631 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
632 uint32_t opcode, uint32_t data)
633 {
634 struct cortex_a_common *a = dpm_to_a(dpm);
635 int retval;
636 uint32_t dscr = DSCR_INSTR_COMP;
637
638 retval = cortex_a_write_dcc(a, data);
639 if (retval != ERROR_OK)
640 return retval;
641
642 return cortex_a_exec_opcode(
643 a->armv7a_common.arm.target,
644 opcode,
645 &dscr);
646 }
647
648 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
649 uint32_t opcode, uint32_t data)
650 {
651 struct cortex_a_common *a = dpm_to_a(dpm);
652 uint32_t dscr = DSCR_INSTR_COMP;
653 int retval;
654
655 retval = cortex_a_write_dcc(a, data);
656 if (retval != ERROR_OK)
657 return retval;
658
659 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
660 retval = cortex_a_exec_opcode(
661 a->armv7a_common.arm.target,
662 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
663 &dscr);
664 if (retval != ERROR_OK)
665 return retval;
666
667 /* then the opcode, taking data from R0 */
668 retval = cortex_a_exec_opcode(
669 a->armv7a_common.arm.target,
670 opcode,
671 &dscr);
672
673 return retval;
674 }
675
676 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
677 {
678 struct target *target = dpm->arm->target;
679 uint32_t dscr = DSCR_INSTR_COMP;
680
681 /* "Prefetch flush" after modifying execution status in CPSR */
682 return cortex_a_exec_opcode(target,
683 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
684 &dscr);
685 }
686
687 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
688 uint32_t opcode, uint32_t *data)
689 {
690 struct cortex_a_common *a = dpm_to_a(dpm);
691 int retval;
692 uint32_t dscr = DSCR_INSTR_COMP;
693
694 /* the opcode, writing data to DCC */
695 retval = cortex_a_exec_opcode(
696 a->armv7a_common.arm.target,
697 opcode,
698 &dscr);
699 if (retval != ERROR_OK)
700 return retval;
701
702 return cortex_a_read_dcc(a, data, &dscr);
703 }
704
705
706 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
707 uint32_t opcode, uint32_t *data)
708 {
709 struct cortex_a_common *a = dpm_to_a(dpm);
710 uint32_t dscr = DSCR_INSTR_COMP;
711 int retval;
712
713 /* the opcode, writing data to R0 */
714 retval = cortex_a_exec_opcode(
715 a->armv7a_common.arm.target,
716 opcode,
717 &dscr);
718 if (retval != ERROR_OK)
719 return retval;
720
721 /* write R0 to DCC */
722 retval = cortex_a_exec_opcode(
723 a->armv7a_common.arm.target,
724 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
725 &dscr);
726 if (retval != ERROR_OK)
727 return retval;
728
729 return cortex_a_read_dcc(a, data, &dscr);
730 }
731
732 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
733 uint32_t addr, uint32_t control)
734 {
735 struct cortex_a_common *a = dpm_to_a(dpm);
736 uint32_t vr = a->armv7a_common.debug_base;
737 uint32_t cr = a->armv7a_common.debug_base;
738 int retval;
739
740 switch (index_t) {
741 case 0 ... 15: /* breakpoints */
742 vr += CPUDBG_BVR_BASE;
743 cr += CPUDBG_BCR_BASE;
744 break;
745 case 16 ... 31: /* watchpoints */
746 vr += CPUDBG_WVR_BASE;
747 cr += CPUDBG_WCR_BASE;
748 index_t -= 16;
749 break;
750 default:
751 return ERROR_FAIL;
752 }
753 vr += 4 * index_t;
754 cr += 4 * index_t;
755
756 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
757 (unsigned) vr, (unsigned) cr);
758
759 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
760 vr, addr);
761 if (retval != ERROR_OK)
762 return retval;
763 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
764 cr, control);
765 return retval;
766 }
767
768 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
769 {
770 struct cortex_a_common *a = dpm_to_a(dpm);
771 uint32_t cr;
772
773 switch (index_t) {
774 case 0 ... 15:
775 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
776 break;
777 case 16 ... 31:
778 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
779 index_t -= 16;
780 break;
781 default:
782 return ERROR_FAIL;
783 }
784 cr += 4 * index_t;
785
786 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
787
788 /* clear control register */
789 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
790 }
791
792 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
793 {
794 struct arm_dpm *dpm = &a->armv7a_common.dpm;
795 int retval;
796
797 dpm->arm = &a->armv7a_common.arm;
798 dpm->didr = didr;
799
800 dpm->prepare = cortex_a_dpm_prepare;
801 dpm->finish = cortex_a_dpm_finish;
802
803 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
804 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
805 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
806
807 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
808 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
809
810 dpm->bpwp_enable = cortex_a_bpwp_enable;
811 dpm->bpwp_disable = cortex_a_bpwp_disable;
812
813 retval = arm_dpm_setup(dpm);
814 if (retval == ERROR_OK)
815 retval = arm_dpm_initialize(dpm);
816
817 return retval;
818 }
819 static struct target *get_cortex_a(struct target *target, int32_t coreid)
820 {
821 struct target_list *head;
822 struct target *curr;
823
824 head = target->head;
825 while (head != (struct target_list *)NULL) {
826 curr = head->target;
827 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
828 return curr;
829 head = head->next;
830 }
831 return target;
832 }
833 static int cortex_a_halt(struct target *target);
834
835 static int cortex_a_halt_smp(struct target *target)
836 {
837 int retval = 0;
838 struct target_list *head;
839 struct target *curr;
840 head = target->head;
841 while (head != (struct target_list *)NULL) {
842 curr = head->target;
843 if ((curr != target) && (curr->state != TARGET_HALTED))
844 retval += cortex_a_halt(curr);
845 head = head->next;
846 }
847 return retval;
848 }
849
850 static int update_halt_gdb(struct target *target)
851 {
852 int retval = 0;
853 if (target->gdb_service && target->gdb_service->core[0] == -1) {
854 target->gdb_service->target = target;
855 target->gdb_service->core[0] = target->coreid;
856 retval += cortex_a_halt_smp(target);
857 }
858 return retval;
859 }
860
861 /*
862 * Cortex-A Run control
863 */
864
865 static int cortex_a_poll(struct target *target)
866 {
867 int retval = ERROR_OK;
868 uint32_t dscr;
869 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
870 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
871 enum target_state prev_target_state = target->state;
872 /* toggle to another core is done by gdb as follow */
873 /* maint packet J core_id */
874 /* continue */
875 /* the next polling trigger an halt event sent to gdb */
876 if ((target->state == TARGET_HALTED) && (target->smp) &&
877 (target->gdb_service) &&
878 (target->gdb_service->target == NULL)) {
879 target->gdb_service->target =
880 get_cortex_a(target, target->gdb_service->core[1]);
881 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
882 return retval;
883 }
884 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
885 armv7a->debug_base + CPUDBG_DSCR, &dscr);
886 if (retval != ERROR_OK)
887 return retval;
888 cortex_a->cpudbg_dscr = dscr;
889
890 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
891 if (prev_target_state != TARGET_HALTED) {
892 /* We have a halting debug event */
893 LOG_DEBUG("Target halted");
894 target->state = TARGET_HALTED;
895 if ((prev_target_state == TARGET_RUNNING)
896 || (prev_target_state == TARGET_UNKNOWN)
897 || (prev_target_state == TARGET_RESET)) {
898 retval = cortex_a_debug_entry(target);
899 if (retval != ERROR_OK)
900 return retval;
901 if (target->smp) {
902 retval = update_halt_gdb(target);
903 if (retval != ERROR_OK)
904 return retval;
905 }
906 target_call_event_callbacks(target,
907 TARGET_EVENT_HALTED);
908 }
909 if (prev_target_state == TARGET_DEBUG_RUNNING) {
910 LOG_DEBUG(" ");
911
912 retval = cortex_a_debug_entry(target);
913 if (retval != ERROR_OK)
914 return retval;
915 if (target->smp) {
916 retval = update_halt_gdb(target);
917 if (retval != ERROR_OK)
918 return retval;
919 }
920
921 target_call_event_callbacks(target,
922 TARGET_EVENT_DEBUG_HALTED);
923 }
924 }
925 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
926 target->state = TARGET_RUNNING;
927 else {
928 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
929 target->state = TARGET_UNKNOWN;
930 }
931
932 return retval;
933 }
934
935 static int cortex_a_halt(struct target *target)
936 {
937 int retval = ERROR_OK;
938 uint32_t dscr;
939 struct armv7a_common *armv7a = target_to_armv7a(target);
940
941 /*
942 * Tell the core to be halted by writing DRCR with 0x1
943 * and then wait for the core to be halted.
944 */
945 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
946 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
947 if (retval != ERROR_OK)
948 return retval;
949
950 /*
951 * enter halting debug mode
952 */
953 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
954 armv7a->debug_base + CPUDBG_DSCR, &dscr);
955 if (retval != ERROR_OK)
956 return retval;
957
958 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
959 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
960 if (retval != ERROR_OK)
961 return retval;
962
963 long long then = timeval_ms();
964 for (;; ) {
965 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
966 armv7a->debug_base + CPUDBG_DSCR, &dscr);
967 if (retval != ERROR_OK)
968 return retval;
969 if ((dscr & DSCR_CORE_HALTED) != 0)
970 break;
971 if (timeval_ms() > then + 1000) {
972 LOG_ERROR("Timeout waiting for halt");
973 return ERROR_FAIL;
974 }
975 }
976
977 target->debug_reason = DBG_REASON_DBGRQ;
978
979 return ERROR_OK;
980 }
981
982 static int cortex_a_internal_restore(struct target *target, int current,
983 uint32_t *address, int handle_breakpoints, int debug_execution)
984 {
985 struct armv7a_common *armv7a = target_to_armv7a(target);
986 struct arm *arm = &armv7a->arm;
987 int retval;
988 uint32_t resume_pc;
989
990 if (!debug_execution)
991 target_free_all_working_areas(target);
992
993 #if 0
994 if (debug_execution) {
995 /* Disable interrupts */
996 /* We disable interrupts in the PRIMASK register instead of
997 * masking with C_MASKINTS,
998 * This is probably the same issue as Cortex-M3 Errata 377493:
999 * C_MASKINTS in parallel with disabled interrupts can cause
1000 * local faults to not be taken. */
1001 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
1002 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
1003 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
1004
1005 /* Make sure we are in Thumb mode */
1006 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
1007 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
1008 32) | (1 << 24));
1009 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
1010 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
1011 }
1012 #endif
1013
1014 /* current = 1: continue on current pc, otherwise continue at <address> */
1015 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
1016 if (!current)
1017 resume_pc = *address;
1018 else
1019 *address = resume_pc;
1020
1021 /* Make sure that the Armv7 gdb thumb fixups does not
1022 * kill the return address
1023 */
1024 switch (arm->core_state) {
1025 case ARM_STATE_ARM:
1026 resume_pc &= 0xFFFFFFFC;
1027 break;
1028 case ARM_STATE_THUMB:
1029 case ARM_STATE_THUMB_EE:
1030 /* When the return address is loaded into PC
1031 * bit 0 must be 1 to stay in Thumb state
1032 */
1033 resume_pc |= 0x1;
1034 break;
1035 case ARM_STATE_JAZELLE:
1036 LOG_ERROR("How do I resume into Jazelle state??");
1037 return ERROR_FAIL;
1038 }
1039 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1040 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1041 arm->pc->dirty = 1;
1042 arm->pc->valid = 1;
1043 /* restore dpm_mode at system halt */
1044 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1045 /* called it now before restoring context because it uses cpu
1046 * register r0 for restoring cp15 control register */
1047 retval = cortex_a_restore_cp15_control_reg(target);
1048 if (retval != ERROR_OK)
1049 return retval;
1050 retval = cortex_a_restore_context(target, handle_breakpoints);
1051 if (retval != ERROR_OK)
1052 return retval;
1053 target->debug_reason = DBG_REASON_NOTHALTED;
1054 target->state = TARGET_RUNNING;
1055
1056 /* registers are now invalid */
1057 register_cache_invalidate(arm->core_cache);
1058
1059 #if 0
1060 /* the front-end may request us not to handle breakpoints */
1061 if (handle_breakpoints) {
1062 /* Single step past breakpoint at current address */
1063 breakpoint = breakpoint_find(target, resume_pc);
1064 if (breakpoint) {
1065 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1066 cortex_m3_unset_breakpoint(target, breakpoint);
1067 cortex_m3_single_step_core(target);
1068 cortex_m3_set_breakpoint(target, breakpoint);
1069 }
1070 }
1071
1072 #endif
1073 return retval;
1074 }
1075
1076 static int cortex_a_internal_restart(struct target *target)
1077 {
1078 struct armv7a_common *armv7a = target_to_armv7a(target);
1079 struct arm *arm = &armv7a->arm;
1080 int retval;
1081 uint32_t dscr;
1082 /*
1083 * * Restart core and wait for it to be started. Clear ITRen and sticky
1084 * * exception flags: see ARMv7 ARM, C5.9.
1085 *
1086 * REVISIT: for single stepping, we probably want to
1087 * disable IRQs by default, with optional override...
1088 */
1089
1090 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1091 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1092 if (retval != ERROR_OK)
1093 return retval;
1094
1095 if ((dscr & DSCR_INSTR_COMP) == 0)
1096 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1097
1098 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1099 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1100 if (retval != ERROR_OK)
1101 return retval;
1102
1103 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1104 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1105 DRCR_CLEAR_EXCEPTIONS);
1106 if (retval != ERROR_OK)
1107 return retval;
1108
1109 long long then = timeval_ms();
1110 for (;; ) {
1111 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1112 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1116 break;
1117 if (timeval_ms() > then + 1000) {
1118 LOG_ERROR("Timeout waiting for resume");
1119 return ERROR_FAIL;
1120 }
1121 }
1122
1123 target->debug_reason = DBG_REASON_NOTHALTED;
1124 target->state = TARGET_RUNNING;
1125
1126 /* registers are now invalid */
1127 register_cache_invalidate(arm->core_cache);
1128
1129 return ERROR_OK;
1130 }
1131
1132 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1133 {
1134 int retval = 0;
1135 struct target_list *head;
1136 struct target *curr;
1137 uint32_t address;
1138 head = target->head;
1139 while (head != (struct target_list *)NULL) {
1140 curr = head->target;
1141 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1142 /* resume current address , not in step mode */
1143 retval += cortex_a_internal_restore(curr, 1, &address,
1144 handle_breakpoints, 0);
1145 retval += cortex_a_internal_restart(curr);
1146 }
1147 head = head->next;
1148
1149 }
1150 return retval;
1151 }
1152
1153 static int cortex_a_resume(struct target *target, int current,
1154 uint32_t address, int handle_breakpoints, int debug_execution)
1155 {
1156 int retval = 0;
1157 /* dummy resume for smp toggle in order to reduce gdb impact */
1158 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1159 /* simulate a start and halt of target */
1160 target->gdb_service->target = NULL;
1161 target->gdb_service->core[0] = target->gdb_service->core[1];
1162 /* fake resume at next poll we play the target core[1], see poll*/
1163 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1164 return 0;
1165 }
1166 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1167 if (target->smp) {
1168 target->gdb_service->core[0] = -1;
1169 retval = cortex_a_restore_smp(target, handle_breakpoints);
1170 if (retval != ERROR_OK)
1171 return retval;
1172 }
1173 cortex_a_internal_restart(target);
1174
1175 if (!debug_execution) {
1176 target->state = TARGET_RUNNING;
1177 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1178 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1179 } else {
1180 target->state = TARGET_DEBUG_RUNNING;
1181 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1182 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1183 }
1184
1185 return ERROR_OK;
1186 }
1187
1188 static int cortex_a_debug_entry(struct target *target)
1189 {
1190 int i;
1191 uint32_t regfile[16], cpsr, dscr;
1192 int retval = ERROR_OK;
1193 struct working_area *regfile_working_area = NULL;
1194 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1195 struct armv7a_common *armv7a = target_to_armv7a(target);
1196 struct arm *arm = &armv7a->arm;
1197 struct reg *reg;
1198
1199 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1200
1201 /* REVISIT surely we should not re-read DSCR !! */
1202 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1203 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1204 if (retval != ERROR_OK)
1205 return retval;
1206
1207 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1208 * imprecise data aborts get discarded by issuing a Data
1209 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1210 */
1211
1212 /* Enable the ITR execution once we are in debug mode */
1213 dscr |= DSCR_ITR_EN;
1214 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1215 armv7a->debug_base + CPUDBG_DSCR, dscr);
1216 if (retval != ERROR_OK)
1217 return retval;
1218
1219 /* Examine debug reason */
1220 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1221
1222 /* save address of instruction that triggered the watchpoint? */
1223 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1224 uint32_t wfar;
1225
1226 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1227 armv7a->debug_base + CPUDBG_WFAR,
1228 &wfar);
1229 if (retval != ERROR_OK)
1230 return retval;
1231 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1232 }
1233
1234 /* REVISIT fast_reg_read is never set ... */
1235
1236 /* Examine target state and mode */
1237 if (cortex_a->fast_reg_read)
1238 target_alloc_working_area(target, 64, &regfile_working_area);
1239
1240 /* First load register acessible through core debug port*/
1241 if (!regfile_working_area)
1242 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1243 else {
1244 retval = cortex_a_read_regs_through_mem(target,
1245 regfile_working_area->address, regfile);
1246
1247 target_free_working_area(target, regfile_working_area);
1248 if (retval != ERROR_OK)
1249 return retval;
1250
1251 /* read Current PSR */
1252 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1253 /* store current cpsr */
1254 if (retval != ERROR_OK)
1255 return retval;
1256
1257 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1258
1259 arm_set_cpsr(arm, cpsr);
1260
1261 /* update cache */
1262 for (i = 0; i <= ARM_PC; i++) {
1263 reg = arm_reg_current(arm, i);
1264
1265 buf_set_u32(reg->value, 0, 32, regfile[i]);
1266 reg->valid = 1;
1267 reg->dirty = 0;
1268 }
1269
1270 /* Fixup PC Resume Address */
1271 if (cpsr & (1 << 5)) {
1272 /* T bit set for Thumb or ThumbEE state */
1273 regfile[ARM_PC] -= 4;
1274 } else {
1275 /* ARM state */
1276 regfile[ARM_PC] -= 8;
1277 }
1278
1279 reg = arm->pc;
1280 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1281 reg->dirty = reg->valid;
1282 }
1283
1284 #if 0
1285 /* TODO, Move this */
1286 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1287 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1288 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1289
1290 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1291 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1292
1293 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1294 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1295 #endif
1296
1297 /* Are we in an exception handler */
1298 /* armv4_5->exception_number = 0; */
1299 if (armv7a->post_debug_entry) {
1300 retval = armv7a->post_debug_entry(target);
1301 if (retval != ERROR_OK)
1302 return retval;
1303 }
1304
1305 return retval;
1306 }
1307
1308 static int cortex_a_post_debug_entry(struct target *target)
1309 {
1310 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1311 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1312 int retval;
1313
1314 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1315 retval = armv7a->arm.mrc(target, 15,
1316 0, 0, /* op1, op2 */
1317 1, 0, /* CRn, CRm */
1318 &cortex_a->cp15_control_reg);
1319 if (retval != ERROR_OK)
1320 return retval;
1321 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1322 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1323
1324 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1325 armv7a_identify_cache(target);
1326
1327 if (armv7a->is_armv7r) {
1328 armv7a->armv7a_mmu.mmu_enabled = 0;
1329 } else {
1330 armv7a->armv7a_mmu.mmu_enabled =
1331 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1332 }
1333 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1334 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1335 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1336 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1337 cortex_a->curr_mode = armv7a->arm.core_mode;
1338
1339 return ERROR_OK;
1340 }
1341
1342 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1343 {
1344 struct armv7a_common *armv7a = target_to_armv7a(target);
1345 uint32_t dscr;
1346
1347 /* Read DSCR */
1348 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1349 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1350 if (ERROR_OK != retval)
1351 return retval;
1352
1353 /* clear bitfield */
1354 dscr &= ~bit_mask;
1355 /* put new value */
1356 dscr |= value & bit_mask;
1357
1358 /* write new DSCR */
1359 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1360 armv7a->debug_base + CPUDBG_DSCR, dscr);
1361 return retval;
1362 }
1363
1364 static int cortex_a_step(struct target *target, int current, uint32_t address,
1365 int handle_breakpoints)
1366 {
1367 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1368 struct armv7a_common *armv7a = target_to_armv7a(target);
1369 struct arm *arm = &armv7a->arm;
1370 struct breakpoint *breakpoint = NULL;
1371 struct breakpoint stepbreakpoint;
1372 struct reg *r;
1373 int retval;
1374
1375 if (target->state != TARGET_HALTED) {
1376 LOG_WARNING("target not halted");
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379
1380 /* current = 1: continue on current pc, otherwise continue at <address> */
1381 r = arm->pc;
1382 if (!current)
1383 buf_set_u32(r->value, 0, 32, address);
1384 else
1385 address = buf_get_u32(r->value, 0, 32);
1386
1387 /* The front-end may request us not to handle breakpoints.
1388 * But since Cortex-A uses breakpoint for single step,
1389 * we MUST handle breakpoints.
1390 */
1391 handle_breakpoints = 1;
1392 if (handle_breakpoints) {
1393 breakpoint = breakpoint_find(target, address);
1394 if (breakpoint)
1395 cortex_a_unset_breakpoint(target, breakpoint);
1396 }
1397
1398 /* Setup single step breakpoint */
1399 stepbreakpoint.address = address;
1400 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1401 ? 2 : 4;
1402 stepbreakpoint.type = BKPT_HARD;
1403 stepbreakpoint.set = 0;
1404
1405 /* Disable interrupts during single step if requested */
1406 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1407 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1408 if (ERROR_OK != retval)
1409 return retval;
1410 }
1411
1412 /* Break on IVA mismatch */
1413 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1414
1415 target->debug_reason = DBG_REASON_SINGLESTEP;
1416
1417 retval = cortex_a_resume(target, 1, address, 0, 0);
1418 if (retval != ERROR_OK)
1419 return retval;
1420
1421 long long then = timeval_ms();
1422 while (target->state != TARGET_HALTED) {
1423 retval = cortex_a_poll(target);
1424 if (retval != ERROR_OK)
1425 return retval;
1426 if (timeval_ms() > then + 1000) {
1427 LOG_ERROR("timeout waiting for target halt");
1428 return ERROR_FAIL;
1429 }
1430 }
1431
1432 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1433
1434 /* Re-enable interrupts if they were disabled */
1435 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1436 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1437 if (ERROR_OK != retval)
1438 return retval;
1439 }
1440
1441
1442 target->debug_reason = DBG_REASON_BREAKPOINT;
1443
1444 if (breakpoint)
1445 cortex_a_set_breakpoint(target, breakpoint, 0);
1446
1447 if (target->state != TARGET_HALTED)
1448 LOG_DEBUG("target stepped");
1449
1450 return ERROR_OK;
1451 }
1452
1453 static int cortex_a_restore_context(struct target *target, bool bpwp)
1454 {
1455 struct armv7a_common *armv7a = target_to_armv7a(target);
1456
1457 LOG_DEBUG(" ");
1458
1459 if (armv7a->pre_restore_context)
1460 armv7a->pre_restore_context(target);
1461
1462 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1463 }
1464
1465 /*
1466 * Cortex-A Breakpoint and watchpoint functions
1467 */
1468
1469 /* Setup hardware Breakpoint Register Pair */
1470 static int cortex_a_set_breakpoint(struct target *target,
1471 struct breakpoint *breakpoint, uint8_t matchmode)
1472 {
1473 int retval;
1474 int brp_i = 0;
1475 uint32_t control;
1476 uint8_t byte_addr_select = 0x0F;
1477 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1478 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1479 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1480
1481 if (breakpoint->set) {
1482 LOG_WARNING("breakpoint already set");
1483 return ERROR_OK;
1484 }
1485
1486 if (breakpoint->type == BKPT_HARD) {
1487 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1488 brp_i++;
1489 if (brp_i >= cortex_a->brp_num) {
1490 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1491 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1492 }
1493 breakpoint->set = brp_i + 1;
1494 if (breakpoint->length == 2)
1495 byte_addr_select = (3 << (breakpoint->address & 0x02));
1496 control = ((matchmode & 0x7) << 20)
1497 | (byte_addr_select << 5)
1498 | (3 << 1) | 1;
1499 brp_list[brp_i].used = 1;
1500 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1501 brp_list[brp_i].control = control;
1502 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1503 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1504 brp_list[brp_i].value);
1505 if (retval != ERROR_OK)
1506 return retval;
1507 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1508 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1509 brp_list[brp_i].control);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1513 brp_list[brp_i].control,
1514 brp_list[brp_i].value);
1515 } else if (breakpoint->type == BKPT_SOFT) {
1516 uint8_t code[4];
1517 if (breakpoint->length == 2)
1518 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1519 else
1520 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1521 retval = target_read_memory(target,
1522 breakpoint->address & 0xFFFFFFFE,
1523 breakpoint->length, 1,
1524 breakpoint->orig_instr);
1525 if (retval != ERROR_OK)
1526 return retval;
1527
1528 /* make sure data cache is cleaned & invalidated down to PoC */
1529 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1530 armv7a_cache_flush_virt(target, breakpoint->address,
1531 breakpoint->length);
1532 }
1533
1534 retval = target_write_memory(target,
1535 breakpoint->address & 0xFFFFFFFE,
1536 breakpoint->length, 1, code);
1537 if (retval != ERROR_OK)
1538 return retval;
1539
1540 /* update i-cache at breakpoint location */
1541 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1542 breakpoint->length);
1543 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1544 breakpoint->length);
1545
1546 breakpoint->set = 0x11; /* Any nice value but 0 */
1547 }
1548
1549 return ERROR_OK;
1550 }
1551
1552 static int cortex_a_set_context_breakpoint(struct target *target,
1553 struct breakpoint *breakpoint, uint8_t matchmode)
1554 {
1555 int retval = ERROR_FAIL;
1556 int brp_i = 0;
1557 uint32_t control;
1558 uint8_t byte_addr_select = 0x0F;
1559 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1560 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1561 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1562
1563 if (breakpoint->set) {
1564 LOG_WARNING("breakpoint already set");
1565 return retval;
1566 }
1567 /*check available context BRPs*/
1568 while ((brp_list[brp_i].used ||
1569 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1570 brp_i++;
1571
1572 if (brp_i >= cortex_a->brp_num) {
1573 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1574 return ERROR_FAIL;
1575 }
1576
1577 breakpoint->set = brp_i + 1;
1578 control = ((matchmode & 0x7) << 20)
1579 | (byte_addr_select << 5)
1580 | (3 << 1) | 1;
1581 brp_list[brp_i].used = 1;
1582 brp_list[brp_i].value = (breakpoint->asid);
1583 brp_list[brp_i].control = control;
1584 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1585 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1586 brp_list[brp_i].value);
1587 if (retval != ERROR_OK)
1588 return retval;
1589 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1590 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1591 brp_list[brp_i].control);
1592 if (retval != ERROR_OK)
1593 return retval;
1594 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1595 brp_list[brp_i].control,
1596 brp_list[brp_i].value);
1597 return ERROR_OK;
1598
1599 }
1600
1601 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1602 {
1603 int retval = ERROR_FAIL;
1604 int brp_1 = 0; /* holds the contextID pair */
1605 int brp_2 = 0; /* holds the IVA pair */
1606 uint32_t control_CTX, control_IVA;
1607 uint8_t CTX_byte_addr_select = 0x0F;
1608 uint8_t IVA_byte_addr_select = 0x0F;
1609 uint8_t CTX_machmode = 0x03;
1610 uint8_t IVA_machmode = 0x01;
1611 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1612 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1613 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1614
1615 if (breakpoint->set) {
1616 LOG_WARNING("breakpoint already set");
1617 return retval;
1618 }
1619 /*check available context BRPs*/
1620 while ((brp_list[brp_1].used ||
1621 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1622 brp_1++;
1623
1624 printf("brp(CTX) found num: %d\n", brp_1);
1625 if (brp_1 >= cortex_a->brp_num) {
1626 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1627 return ERROR_FAIL;
1628 }
1629
1630 while ((brp_list[brp_2].used ||
1631 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1632 brp_2++;
1633
1634 printf("brp(IVA) found num: %d\n", brp_2);
1635 if (brp_2 >= cortex_a->brp_num) {
1636 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1637 return ERROR_FAIL;
1638 }
1639
1640 breakpoint->set = brp_1 + 1;
1641 breakpoint->linked_BRP = brp_2;
1642 control_CTX = ((CTX_machmode & 0x7) << 20)
1643 | (brp_2 << 16)
1644 | (0 << 14)
1645 | (CTX_byte_addr_select << 5)
1646 | (3 << 1) | 1;
1647 brp_list[brp_1].used = 1;
1648 brp_list[brp_1].value = (breakpoint->asid);
1649 brp_list[brp_1].control = control_CTX;
1650 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1651 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1652 brp_list[brp_1].value);
1653 if (retval != ERROR_OK)
1654 return retval;
1655 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1656 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1657 brp_list[brp_1].control);
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 control_IVA = ((IVA_machmode & 0x7) << 20)
1662 | (brp_1 << 16)
1663 | (IVA_byte_addr_select << 5)
1664 | (3 << 1) | 1;
1665 brp_list[brp_2].used = 1;
1666 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1667 brp_list[brp_2].control = control_IVA;
1668 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1669 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1670 brp_list[brp_2].value);
1671 if (retval != ERROR_OK)
1672 return retval;
1673 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1674 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1675 brp_list[brp_2].control);
1676 if (retval != ERROR_OK)
1677 return retval;
1678
1679 return ERROR_OK;
1680 }
1681
1682 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1683 {
1684 int retval;
1685 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1686 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1687 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1688
1689 if (!breakpoint->set) {
1690 LOG_WARNING("breakpoint not set");
1691 return ERROR_OK;
1692 }
1693
1694 if (breakpoint->type == BKPT_HARD) {
1695 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1696 int brp_i = breakpoint->set - 1;
1697 int brp_j = breakpoint->linked_BRP;
1698 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1699 LOG_DEBUG("Invalid BRP number in breakpoint");
1700 return ERROR_OK;
1701 }
1702 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1703 brp_list[brp_i].control, brp_list[brp_i].value);
1704 brp_list[brp_i].used = 0;
1705 brp_list[brp_i].value = 0;
1706 brp_list[brp_i].control = 0;
1707 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1708 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1709 brp_list[brp_i].control);
1710 if (retval != ERROR_OK)
1711 return retval;
1712 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1713 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1714 brp_list[brp_i].value);
1715 if (retval != ERROR_OK)
1716 return retval;
1717 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1718 LOG_DEBUG("Invalid BRP number in breakpoint");
1719 return ERROR_OK;
1720 }
1721 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1722 brp_list[brp_j].control, brp_list[brp_j].value);
1723 brp_list[brp_j].used = 0;
1724 brp_list[brp_j].value = 0;
1725 brp_list[brp_j].control = 0;
1726 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1727 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1728 brp_list[brp_j].control);
1729 if (retval != ERROR_OK)
1730 return retval;
1731 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1732 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1733 brp_list[brp_j].value);
1734 if (retval != ERROR_OK)
1735 return retval;
1736 breakpoint->linked_BRP = 0;
1737 breakpoint->set = 0;
1738 return ERROR_OK;
1739
1740 } else {
1741 int brp_i = breakpoint->set - 1;
1742 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1743 LOG_DEBUG("Invalid BRP number in breakpoint");
1744 return ERROR_OK;
1745 }
1746 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1747 brp_list[brp_i].control, brp_list[brp_i].value);
1748 brp_list[brp_i].used = 0;
1749 brp_list[brp_i].value = 0;
1750 brp_list[brp_i].control = 0;
1751 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1752 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1753 brp_list[brp_i].control);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1757 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1758 brp_list[brp_i].value);
1759 if (retval != ERROR_OK)
1760 return retval;
1761 breakpoint->set = 0;
1762 return ERROR_OK;
1763 }
1764 } else {
1765
1766 /* make sure data cache is cleaned & invalidated down to PoC */
1767 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1768 armv7a_cache_flush_virt(target, breakpoint->address,
1769 breakpoint->length);
1770 }
1771
1772 /* restore original instruction (kept in target endianness) */
1773 if (breakpoint->length == 4) {
1774 retval = target_write_memory(target,
1775 breakpoint->address & 0xFFFFFFFE,
1776 4, 1, breakpoint->orig_instr);
1777 if (retval != ERROR_OK)
1778 return retval;
1779 } else {
1780 retval = target_write_memory(target,
1781 breakpoint->address & 0xFFFFFFFE,
1782 2, 1, breakpoint->orig_instr);
1783 if (retval != ERROR_OK)
1784 return retval;
1785 }
1786
1787 /* update i-cache at breakpoint location */
1788 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1789 breakpoint->length);
1790 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1791 breakpoint->length);
1792 }
1793 breakpoint->set = 0;
1794
1795 return ERROR_OK;
1796 }
1797
1798 static int cortex_a_add_breakpoint(struct target *target,
1799 struct breakpoint *breakpoint)
1800 {
1801 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1802
1803 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1804 LOG_INFO("no hardware breakpoint available");
1805 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1806 }
1807
1808 if (breakpoint->type == BKPT_HARD)
1809 cortex_a->brp_num_available--;
1810
1811 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1812 }
1813
1814 static int cortex_a_add_context_breakpoint(struct target *target,
1815 struct breakpoint *breakpoint)
1816 {
1817 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1818
1819 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1820 LOG_INFO("no hardware breakpoint available");
1821 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1822 }
1823
1824 if (breakpoint->type == BKPT_HARD)
1825 cortex_a->brp_num_available--;
1826
1827 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1828 }
1829
1830 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1831 struct breakpoint *breakpoint)
1832 {
1833 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1834
1835 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1836 LOG_INFO("no hardware breakpoint available");
1837 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1838 }
1839
1840 if (breakpoint->type == BKPT_HARD)
1841 cortex_a->brp_num_available--;
1842
1843 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1844 }
1845
1846
1847 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1848 {
1849 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1850
1851 #if 0
1852 /* It is perfectly possible to remove breakpoints while the target is running */
1853 if (target->state != TARGET_HALTED) {
1854 LOG_WARNING("target not halted");
1855 return ERROR_TARGET_NOT_HALTED;
1856 }
1857 #endif
1858
1859 if (breakpoint->set) {
1860 cortex_a_unset_breakpoint(target, breakpoint);
1861 if (breakpoint->type == BKPT_HARD)
1862 cortex_a->brp_num_available++;
1863 }
1864
1865
1866 return ERROR_OK;
1867 }
1868
1869 /*
1870 * Cortex-A Reset functions
1871 */
1872
1873 static int cortex_a_assert_reset(struct target *target)
1874 {
1875 struct armv7a_common *armv7a = target_to_armv7a(target);
1876
1877 LOG_DEBUG(" ");
1878
1879 /* FIXME when halt is requested, make it work somehow... */
1880
1881 /* Issue some kind of warm reset. */
1882 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1883 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1884 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1885 /* REVISIT handle "pulls" cases, if there's
1886 * hardware that needs them to work.
1887 */
1888 jtag_add_reset(0, 1);
1889 } else {
1890 LOG_ERROR("%s: how to reset?", target_name(target));
1891 return ERROR_FAIL;
1892 }
1893
1894 /* registers are now invalid */
1895 register_cache_invalidate(armv7a->arm.core_cache);
1896
1897 target->state = TARGET_RESET;
1898
1899 return ERROR_OK;
1900 }
1901
1902 static int cortex_a_deassert_reset(struct target *target)
1903 {
1904 int retval;
1905
1906 LOG_DEBUG(" ");
1907
1908 /* be certain SRST is off */
1909 jtag_add_reset(0, 0);
1910
1911 retval = cortex_a_poll(target);
1912 if (retval != ERROR_OK)
1913 return retval;
1914
1915 if (target->reset_halt) {
1916 if (target->state != TARGET_HALTED) {
1917 LOG_WARNING("%s: ran after reset and before halt ...",
1918 target_name(target));
1919 retval = target_halt(target);
1920 if (retval != ERROR_OK)
1921 return retval;
1922 }
1923 }
1924
1925 return ERROR_OK;
1926 }
1927
1928 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1929 {
1930 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1931 * New desired mode must be in mode. Current value of DSCR must be in
1932 * *dscr, which is updated with new value.
1933 *
1934 * This function elides actually sending the mode-change over the debug
1935 * interface if the mode is already set as desired.
1936 */
1937 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1938 if (new_dscr != *dscr) {
1939 struct armv7a_common *armv7a = target_to_armv7a(target);
1940 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1941 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1942 if (retval == ERROR_OK)
1943 *dscr = new_dscr;
1944 return retval;
1945 } else {
1946 return ERROR_OK;
1947 }
1948 }
1949
1950 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1951 uint32_t value, uint32_t *dscr)
1952 {
1953 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1954 struct armv7a_common *armv7a = target_to_armv7a(target);
1955 long long then = timeval_ms();
1956 int retval;
1957
1958 while ((*dscr & mask) != value) {
1959 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1960 armv7a->debug_base + CPUDBG_DSCR, dscr);
1961 if (retval != ERROR_OK)
1962 return retval;
1963 if (timeval_ms() > then + 1000) {
1964 LOG_ERROR("timeout waiting for DSCR bit change");
1965 return ERROR_FAIL;
1966 }
1967 }
1968 return ERROR_OK;
1969 }
1970
1971 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1972 uint32_t *data, uint32_t *dscr)
1973 {
1974 int retval;
1975 struct armv7a_common *armv7a = target_to_armv7a(target);
1976
1977 /* Move from coprocessor to R0. */
1978 retval = cortex_a_exec_opcode(target, opcode, dscr);
1979 if (retval != ERROR_OK)
1980 return retval;
1981
1982 /* Move from R0 to DTRTX. */
1983 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1984 if (retval != ERROR_OK)
1985 return retval;
1986
1987 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1988 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1989 * must also check TXfull_l). Most of the time this will be free
1990 * because TXfull_l will be set immediately and cached in dscr. */
1991 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1992 DSCR_DTRTX_FULL_LATCHED, dscr);
1993 if (retval != ERROR_OK)
1994 return retval;
1995
1996 /* Read the value transferred to DTRTX. */
1997 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1998 armv7a->debug_base + CPUDBG_DTRTX, data);
1999 if (retval != ERROR_OK)
2000 return retval;
2001
2002 return ERROR_OK;
2003 }
2004
2005 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2006 uint32_t *dfsr, uint32_t *dscr)
2007 {
2008 int retval;
2009
2010 if (dfar) {
2011 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2012 if (retval != ERROR_OK)
2013 return retval;
2014 }
2015
2016 if (dfsr) {
2017 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2018 if (retval != ERROR_OK)
2019 return retval;
2020 }
2021
2022 return ERROR_OK;
2023 }
2024
2025 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2026 uint32_t data, uint32_t *dscr)
2027 {
2028 int retval;
2029 struct armv7a_common *armv7a = target_to_armv7a(target);
2030
2031 /* Write the value into DTRRX. */
2032 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2033 armv7a->debug_base + CPUDBG_DTRRX, data);
2034 if (retval != ERROR_OK)
2035 return retval;
2036
2037 /* Move from DTRRX to R0. */
2038 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2039 if (retval != ERROR_OK)
2040 return retval;
2041
2042 /* Move from R0 to coprocessor. */
2043 retval = cortex_a_exec_opcode(target, opcode, dscr);
2044 if (retval != ERROR_OK)
2045 return retval;
2046
2047 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2048 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2049 * check RXfull_l). Most of the time this will be free because RXfull_l
2050 * will be cleared immediately and cached in dscr. */
2051 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2052 if (retval != ERROR_OK)
2053 return retval;
2054
2055 return ERROR_OK;
2056 }
2057
2058 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2059 uint32_t dfsr, uint32_t *dscr)
2060 {
2061 int retval;
2062
2063 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2064 if (retval != ERROR_OK)
2065 return retval;
2066
2067 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2068 if (retval != ERROR_OK)
2069 return retval;
2070
2071 return ERROR_OK;
2072 }
2073
2074 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2075 {
2076 uint32_t status, upper4;
2077
2078 if (dfsr & (1 << 9)) {
2079 /* LPAE format. */
2080 status = dfsr & 0x3f;
2081 upper4 = status >> 2;
2082 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2083 return ERROR_TARGET_TRANSLATION_FAULT;
2084 else if (status == 33)
2085 return ERROR_TARGET_UNALIGNED_ACCESS;
2086 else
2087 return ERROR_TARGET_DATA_ABORT;
2088 } else {
2089 /* Normal format. */
2090 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2091 if (status == 1)
2092 return ERROR_TARGET_UNALIGNED_ACCESS;
2093 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2094 status == 9 || status == 11 || status == 13 || status == 15)
2095 return ERROR_TARGET_TRANSLATION_FAULT;
2096 else
2097 return ERROR_TARGET_DATA_ABORT;
2098 }
2099 }
2100
2101 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2102 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2103 {
2104 /* Writes count objects of size size from *buffer. Old value of DSCR must
2105 * be in *dscr; updated to new value. This is slow because it works for
2106 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2107 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2108 * preferred.
2109 * Preconditions:
2110 * - Address is in R0.
2111 * - R0 is marked dirty.
2112 */
2113 struct armv7a_common *armv7a = target_to_armv7a(target);
2114 struct arm *arm = &armv7a->arm;
2115 int retval;
2116
2117 /* Mark register R1 as dirty, to use for transferring data. */
2118 arm_reg_current(arm, 1)->dirty = true;
2119
2120 /* Switch to non-blocking mode if not already in that mode. */
2121 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2122 if (retval != ERROR_OK)
2123 return retval;
2124
2125 /* Go through the objects. */
2126 while (count) {
2127 /* Write the value to store into DTRRX. */
2128 uint32_t data, opcode;
2129 if (size == 1)
2130 data = *buffer;
2131 else if (size == 2)
2132 data = target_buffer_get_u16(target, buffer);
2133 else
2134 data = target_buffer_get_u32(target, buffer);
2135 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2136 armv7a->debug_base + CPUDBG_DTRRX, data);
2137 if (retval != ERROR_OK)
2138 return retval;
2139
2140 /* Transfer the value from DTRRX to R1. */
2141 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2142 if (retval != ERROR_OK)
2143 return retval;
2144
2145 /* Write the value transferred to R1 into memory. */
2146 if (size == 1)
2147 opcode = ARMV4_5_STRB_IP(1, 0);
2148 else if (size == 2)
2149 opcode = ARMV4_5_STRH_IP(1, 0);
2150 else
2151 opcode = ARMV4_5_STRW_IP(1, 0);
2152 retval = cortex_a_exec_opcode(target, opcode, dscr);
2153 if (retval != ERROR_OK)
2154 return retval;
2155
2156 /* Check for faults and return early. */
2157 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2158 return ERROR_OK; /* A data fault is not considered a system failure. */
2159
2160 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2161 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2162 * must also check RXfull_l). Most of the time this will be free
2163 * because RXfull_l will be cleared immediately and cached in dscr. */
2164 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2165 if (retval != ERROR_OK)
2166 return retval;
2167
2168 /* Advance. */
2169 buffer += size;
2170 --count;
2171 }
2172
2173 return ERROR_OK;
2174 }
2175
2176 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2177 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2178 {
2179 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2180 * in *dscr; updated to new value. This is fast but only works for
2181 * word-sized objects at aligned addresses.
2182 * Preconditions:
2183 * - Address is in R0 and must be a multiple of 4.
2184 * - R0 is marked dirty.
2185 */
2186 struct armv7a_common *armv7a = target_to_armv7a(target);
2187 int retval;
2188
2189 /* Switch to fast mode if not already in that mode. */
2190 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2191 if (retval != ERROR_OK)
2192 return retval;
2193
2194 /* Latch STC instruction. */
2195 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2196 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2197 if (retval != ERROR_OK)
2198 return retval;
2199
2200 /* Transfer all the data and issue all the instructions. */
2201 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2202 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2203 }
2204
2205 static int cortex_a_write_apb_ab_memory(struct target *target,
2206 uint32_t address, uint32_t size,
2207 uint32_t count, const uint8_t *buffer)
2208 {
2209 /* Write memory through APB-AP. */
2210 int retval, final_retval;
2211 struct armv7a_common *armv7a = target_to_armv7a(target);
2212 struct arm *arm = &armv7a->arm;
2213 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2214
2215 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2216 address, size, count);
2217 if (target->state != TARGET_HALTED) {
2218 LOG_WARNING("target not halted");
2219 return ERROR_TARGET_NOT_HALTED;
2220 }
2221
2222 if (!count)
2223 return ERROR_OK;
2224
2225 /* Clear any abort. */
2226 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2227 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2228 if (retval != ERROR_OK)
2229 return retval;
2230
2231 /* Read DSCR. */
2232 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2233 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2234 if (retval != ERROR_OK)
2235 return retval;
2236
2237 /* Switch to non-blocking mode if not already in that mode. */
2238 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2239 if (retval != ERROR_OK)
2240 goto out;
2241
2242 /* Mark R0 as dirty. */
2243 arm_reg_current(arm, 0)->dirty = true;
2244
2245 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2246 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2247 if (retval != ERROR_OK)
2248 goto out;
2249
2250 /* Get the memory address into R0. */
2251 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2252 armv7a->debug_base + CPUDBG_DTRRX, address);
2253 if (retval != ERROR_OK)
2254 goto out;
2255 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2256 if (retval != ERROR_OK)
2257 goto out;
2258
2259 if (size == 4 && (address % 4) == 0) {
2260 /* We are doing a word-aligned transfer, so use fast mode. */
2261 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2262 } else {
2263 /* Use slow path. */
2264 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2265 }
2266
2267 out:
2268 final_retval = retval;
2269
2270 /* Switch to non-blocking mode if not already in that mode. */
2271 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2272 if (final_retval == ERROR_OK)
2273 final_retval = retval;
2274
2275 /* Wait for last issued instruction to complete. */
2276 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2277 if (final_retval == ERROR_OK)
2278 final_retval = retval;
2279
2280 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2281 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2282 * check RXfull_l). Most of the time this will be free because RXfull_l
2283 * will be cleared immediately and cached in dscr. However, don’t do this
2284 * if there is fault, because then the instruction might not have completed
2285 * successfully. */
2286 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2287 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2288 if (retval != ERROR_OK)
2289 return retval;
2290 }
2291
2292 /* If there were any sticky abort flags, clear them. */
2293 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2294 fault_dscr = dscr;
2295 mem_ap_write_atomic_u32(armv7a->debug_ap,
2296 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2297 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2298 } else {
2299 fault_dscr = 0;
2300 }
2301
2302 /* Handle synchronous data faults. */
2303 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2304 if (final_retval == ERROR_OK) {
2305 /* Final return value will reflect cause of fault. */
2306 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2307 if (retval == ERROR_OK) {
2308 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2309 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2310 } else
2311 final_retval = retval;
2312 }
2313 /* Fault destroyed DFAR/DFSR; restore them. */
2314 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2315 if (retval != ERROR_OK)
2316 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2317 }
2318
2319 /* Handle asynchronous data faults. */
2320 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2321 if (final_retval == ERROR_OK)
2322 /* No other error has been recorded so far, so keep this one. */
2323 final_retval = ERROR_TARGET_DATA_ABORT;
2324 }
2325
2326 /* If the DCC is nonempty, clear it. */
2327 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2328 uint32_t dummy;
2329 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2330 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2331 if (final_retval == ERROR_OK)
2332 final_retval = retval;
2333 }
2334 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2335 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2336 if (final_retval == ERROR_OK)
2337 final_retval = retval;
2338 }
2339
2340 /* Done. */
2341 return final_retval;
2342 }
2343
2344 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2345 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2346 {
2347 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2348 * in *dscr; updated to new value. This is slow because it works for
2349 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2350 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2351 * preferred.
2352 * Preconditions:
2353 * - Address is in R0.
2354 * - R0 is marked dirty.
2355 */
2356 struct armv7a_common *armv7a = target_to_armv7a(target);
2357 struct arm *arm = &armv7a->arm;
2358 int retval;
2359
2360 /* Mark register R1 as dirty, to use for transferring data. */
2361 arm_reg_current(arm, 1)->dirty = true;
2362
2363 /* Switch to non-blocking mode if not already in that mode. */
2364 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2365 if (retval != ERROR_OK)
2366 return retval;
2367
2368 /* Go through the objects. */
2369 while (count) {
2370 /* Issue a load of the appropriate size to R1. */
2371 uint32_t opcode, data;
2372 if (size == 1)
2373 opcode = ARMV4_5_LDRB_IP(1, 0);
2374 else if (size == 2)
2375 opcode = ARMV4_5_LDRH_IP(1, 0);
2376 else
2377 opcode = ARMV4_5_LDRW_IP(1, 0);
2378 retval = cortex_a_exec_opcode(target, opcode, dscr);
2379 if (retval != ERROR_OK)
2380 return retval;
2381
2382 /* Issue a write of R1 to DTRTX. */
2383 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2384 if (retval != ERROR_OK)
2385 return retval;
2386
2387 /* Check for faults and return early. */
2388 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2389 return ERROR_OK; /* A data fault is not considered a system failure. */
2390
2391 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2392 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2393 * must also check TXfull_l). Most of the time this will be free
2394 * because TXfull_l will be set immediately and cached in dscr. */
2395 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2396 DSCR_DTRTX_FULL_LATCHED, dscr);
2397 if (retval != ERROR_OK)
2398 return retval;
2399
2400 /* Read the value transferred to DTRTX into the buffer. */
2401 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2402 armv7a->debug_base + CPUDBG_DTRTX, &data);
2403 if (retval != ERROR_OK)
2404 return retval;
2405 if (size == 1)
2406 *buffer = (uint8_t) data;
2407 else if (size == 2)
2408 target_buffer_set_u16(target, buffer, (uint16_t) data);
2409 else
2410 target_buffer_set_u32(target, buffer, data);
2411
2412 /* Advance. */
2413 buffer += size;
2414 --count;
2415 }
2416
2417 return ERROR_OK;
2418 }
2419
2420 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2421 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2422 {
2423 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2424 * *dscr; updated to new value. This is fast but only works for word-sized
2425 * objects at aligned addresses.
2426 * Preconditions:
2427 * - Address is in R0 and must be a multiple of 4.
2428 * - R0 is marked dirty.
2429 */
2430 struct armv7a_common *armv7a = target_to_armv7a(target);
2431 uint32_t u32;
2432 int retval;
2433
2434 /* Switch to non-blocking mode if not already in that mode. */
2435 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2436 if (retval != ERROR_OK)
2437 return retval;
2438
2439 /* Issue the LDC instruction via a write to ITR. */
2440 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2441 if (retval != ERROR_OK)
2442 return retval;
2443
2444 count--;
2445
2446 if (count > 0) {
2447 /* Switch to fast mode if not already in that mode. */
2448 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2449 if (retval != ERROR_OK)
2450 return retval;
2451
2452 /* Latch LDC instruction. */
2453 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2454 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2455 if (retval != ERROR_OK)
2456 return retval;
2457
2458 /* Read the value transferred to DTRTX into the buffer. Due to fast
2459 * mode rules, this blocks until the instruction finishes executing and
2460 * then reissues the read instruction to read the next word from
2461 * memory. The last read of DTRTX in this call reads the second-to-last
2462 * word from memory and issues the read instruction for the last word.
2463 */
2464 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2465 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2466 if (retval != ERROR_OK)
2467 return retval;
2468
2469 /* Advance. */
2470 buffer += count * 4;
2471 }
2472
2473 /* Wait for last issued instruction to complete. */
2474 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2475 if (retval != ERROR_OK)
2476 return retval;
2477
2478 /* Switch to non-blocking mode if not already in that mode. */
2479 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2480 if (retval != ERROR_OK)
2481 return retval;
2482
2483 /* Check for faults and return early. */
2484 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2485 return ERROR_OK; /* A data fault is not considered a system failure. */
2486
2487 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2488 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2489 * check TXfull_l). Most of the time this will be free because TXfull_l
2490 * will be set immediately and cached in dscr. */
2491 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2492 DSCR_DTRTX_FULL_LATCHED, dscr);
2493 if (retval != ERROR_OK)
2494 return retval;
2495
2496 /* Read the value transferred to DTRTX into the buffer. This is the last
2497 * word. */
2498 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2499 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2500 if (retval != ERROR_OK)
2501 return retval;
2502 target_buffer_set_u32(target, buffer, u32);
2503
2504 return ERROR_OK;
2505 }
2506
2507 static int cortex_a_read_apb_ab_memory(struct target *target,
2508 uint32_t address, uint32_t size,
2509 uint32_t count, uint8_t *buffer)
2510 {
2511 /* Read memory through APB-AP. */
2512 int retval, final_retval;
2513 struct armv7a_common *armv7a = target_to_armv7a(target);
2514 struct arm *arm = &armv7a->arm;
2515 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2516
2517 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2518 address, size, count);
2519 if (target->state != TARGET_HALTED) {
2520 LOG_WARNING("target not halted");
2521 return ERROR_TARGET_NOT_HALTED;
2522 }
2523
2524 if (!count)
2525 return ERROR_OK;
2526
2527 /* Clear any abort. */
2528 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2529 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2530 if (retval != ERROR_OK)
2531 return retval;
2532
2533 /* Read DSCR */
2534 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2535 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2536 if (retval != ERROR_OK)
2537 return retval;
2538
2539 /* Switch to non-blocking mode if not already in that mode. */
2540 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2541 if (retval != ERROR_OK)
2542 goto out;
2543
2544 /* Mark R0 as dirty. */
2545 arm_reg_current(arm, 0)->dirty = true;
2546
2547 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2548 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2549 if (retval != ERROR_OK)
2550 goto out;
2551
2552 /* Get the memory address into R0. */
2553 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2554 armv7a->debug_base + CPUDBG_DTRRX, address);
2555 if (retval != ERROR_OK)
2556 goto out;
2557 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2558 if (retval != ERROR_OK)
2559 goto out;
2560
2561 if (size == 4 && (address % 4) == 0) {
2562 /* We are doing a word-aligned transfer, so use fast mode. */
2563 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2564 } else {
2565 /* Use slow path. */
2566 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2567 }
2568
2569 out:
2570 final_retval = retval;
2571
2572 /* Switch to non-blocking mode if not already in that mode. */
2573 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2574 if (final_retval == ERROR_OK)
2575 final_retval = retval;
2576
2577 /* Wait for last issued instruction to complete. */
2578 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2579 if (final_retval == ERROR_OK)
2580 final_retval = retval;
2581
2582 /* If there were any sticky abort flags, clear them. */
2583 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2584 fault_dscr = dscr;
2585 mem_ap_write_atomic_u32(armv7a->debug_ap,
2586 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2587 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2588 } else {
2589 fault_dscr = 0;
2590 }
2591
2592 /* Handle synchronous data faults. */
2593 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2594 if (final_retval == ERROR_OK) {
2595 /* Final return value will reflect cause of fault. */
2596 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2597 if (retval == ERROR_OK) {
2598 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2599 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2600 } else
2601 final_retval = retval;
2602 }
2603 /* Fault destroyed DFAR/DFSR; restore them. */
2604 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2605 if (retval != ERROR_OK)
2606 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2607 }
2608
2609 /* Handle asynchronous data faults. */
2610 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2611 if (final_retval == ERROR_OK)
2612 /* No other error has been recorded so far, so keep this one. */
2613 final_retval = ERROR_TARGET_DATA_ABORT;
2614 }
2615
2616 /* If the DCC is nonempty, clear it. */
2617 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2618 uint32_t dummy;
2619 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2620 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2621 if (final_retval == ERROR_OK)
2622 final_retval = retval;
2623 }
2624 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2625 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2626 if (final_retval == ERROR_OK)
2627 final_retval = retval;
2628 }
2629
2630 /* Done. */
2631 return final_retval;
2632 }
2633
2634
2635 /*
2636 * Cortex-A Memory access
2637 *
2638 * This is same Cortex M3 but we must also use the correct
2639 * ap number for every access.
2640 */
2641
2642 static int cortex_a_read_phys_memory(struct target *target,
2643 uint32_t address, uint32_t size,
2644 uint32_t count, uint8_t *buffer)
2645 {
2646 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2647
2648 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2649 address, size, count);
2650
2651 if (count && buffer) {
2652 /* read memory through APB-AP */
2653 cortex_a_prep_memaccess(target, 1);
2654 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2655 cortex_a_post_memaccess(target, 1);
2656 }
2657 return retval;
2658 }
2659
2660 static int cortex_a_read_memory(struct target *target, uint32_t address,
2661 uint32_t size, uint32_t count, uint8_t *buffer)
2662 {
2663 int retval;
2664
2665 /* cortex_a handles unaligned memory access */
2666 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2667 size, count);
2668
2669 cortex_a_prep_memaccess(target, 0);
2670 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2671 cortex_a_post_memaccess(target, 0);
2672
2673 return retval;
2674 }
2675
2676 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2677 uint32_t size, uint32_t count, uint8_t *buffer)
2678 {
2679 int mmu_enabled = 0;
2680 uint32_t virt, phys;
2681 int retval;
2682 struct armv7a_common *armv7a = target_to_armv7a(target);
2683 struct adiv5_dap *swjdp = armv7a->arm.dap;
2684 uint8_t apsel = swjdp->apsel;
2685
2686 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2687 return target_read_memory(target, address, size, count, buffer);
2688
2689 /* cortex_a handles unaligned memory access */
2690 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2691 size, count);
2692
2693 /* determine if MMU was enabled on target stop */
2694 if (!armv7a->is_armv7r) {
2695 retval = cortex_a_mmu(target, &mmu_enabled);
2696 if (retval != ERROR_OK)
2697 return retval;
2698 }
2699
2700 if (mmu_enabled) {
2701 virt = address;
2702 retval = cortex_a_virt2phys(target, virt, &phys);
2703 if (retval != ERROR_OK)
2704 return retval;
2705
2706 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2707 virt, phys);
2708 address = phys;
2709 }
2710
2711 if (!count || !buffer)
2712 return ERROR_COMMAND_SYNTAX_ERROR;
2713
2714 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2715
2716 return retval;
2717 }
2718
2719 static int cortex_a_write_phys_memory(struct target *target,
2720 uint32_t address, uint32_t size,
2721 uint32_t count, const uint8_t *buffer)
2722 {
2723 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2724
2725 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2726 size, count);
2727
2728 if (count && buffer) {
2729 /* write memory through APB-AP */
2730 cortex_a_prep_memaccess(target, 1);
2731 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2732 cortex_a_post_memaccess(target, 1);
2733 }
2734
2735 return retval;
2736 }
2737
2738 static int cortex_a_write_memory(struct target *target, uint32_t address,
2739 uint32_t size, uint32_t count, const uint8_t *buffer)
2740 {
2741 int retval;
2742
2743 /* cortex_a handles unaligned memory access */
2744 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2745 size, count);
2746
2747 /* memory writes bypass the caches, must flush before writing */
2748 armv7a_cache_auto_flush_on_write(target, address, size * count);
2749
2750 cortex_a_prep_memaccess(target, 0);
2751 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2752 cortex_a_post_memaccess(target, 0);
2753 return retval;
2754 }
2755
2756 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2757 uint32_t size, uint32_t count, const uint8_t *buffer)
2758 {
2759 int mmu_enabled = 0;
2760 uint32_t virt, phys;
2761 int retval;
2762 struct armv7a_common *armv7a = target_to_armv7a(target);
2763 struct adiv5_dap *swjdp = armv7a->arm.dap;
2764 uint8_t apsel = swjdp->apsel;
2765
2766 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2767 return target_write_memory(target, address, size, count, buffer);
2768
2769 /* cortex_a handles unaligned memory access */
2770 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2771 size, count);
2772
2773 /* determine if MMU was enabled on target stop */
2774 if (!armv7a->is_armv7r) {
2775 retval = cortex_a_mmu(target, &mmu_enabled);
2776 if (retval != ERROR_OK)
2777 return retval;
2778 }
2779
2780 if (mmu_enabled) {
2781 virt = address;
2782 retval = cortex_a_virt2phys(target, virt, &phys);
2783 if (retval != ERROR_OK)
2784 return retval;
2785
2786 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2787 virt,
2788 phys);
2789 address = phys;
2790 }
2791
2792 if (!count || !buffer)
2793 return ERROR_COMMAND_SYNTAX_ERROR;
2794
2795 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2796
2797 return retval;
2798 }
2799
2800 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2801 uint32_t count, uint8_t *buffer)
2802 {
2803 uint32_t size;
2804
2805 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2806 * will have something to do with the size we leave to it. */
2807 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2808 if (address & size) {
2809 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2810 if (retval != ERROR_OK)
2811 return retval;
2812 address += size;
2813 count -= size;
2814 buffer += size;
2815 }
2816 }
2817
2818 /* Read the data with as large access size as possible. */
2819 for (; size > 0; size /= 2) {
2820 uint32_t aligned = count - count % size;
2821 if (aligned > 0) {
2822 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2823 if (retval != ERROR_OK)
2824 return retval;
2825 address += aligned;
2826 count -= aligned;
2827 buffer += aligned;
2828 }
2829 }
2830
2831 return ERROR_OK;
2832 }
2833
2834 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2835 uint32_t count, const uint8_t *buffer)
2836 {
2837 uint32_t size;
2838
2839 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2840 * will have something to do with the size we leave to it. */
2841 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2842 if (address & size) {
2843 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2844 if (retval != ERROR_OK)
2845 return retval;
2846 address += size;
2847 count -= size;
2848 buffer += size;
2849 }
2850 }
2851
2852 /* Write the data with as large access size as possible. */
2853 for (; size > 0; size /= 2) {
2854 uint32_t aligned = count - count % size;
2855 if (aligned > 0) {
2856 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2857 if (retval != ERROR_OK)
2858 return retval;
2859 address += aligned;
2860 count -= aligned;
2861 buffer += aligned;
2862 }
2863 }
2864
2865 return ERROR_OK;
2866 }
2867
2868 static int cortex_a_handle_target_request(void *priv)
2869 {
2870 struct target *target = priv;
2871 struct armv7a_common *armv7a = target_to_armv7a(target);
2872 int retval;
2873
2874 if (!target_was_examined(target))
2875 return ERROR_OK;
2876 if (!target->dbg_msg_enabled)
2877 return ERROR_OK;
2878
2879 if (target->state == TARGET_RUNNING) {
2880 uint32_t request;
2881 uint32_t dscr;
2882 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2883 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2884
2885 /* check if we have data */
2886 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2887 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2888 armv7a->debug_base + CPUDBG_DTRTX, &request);
2889 if (retval == ERROR_OK) {
2890 target_request(target, request);
2891 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2892 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2893 }
2894 }
2895 }
2896
2897 return ERROR_OK;
2898 }
2899
2900 /*
2901 * Cortex-A target information and configuration
2902 */
2903
2904 static int cortex_a_examine_first(struct target *target)
2905 {
2906 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2907 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2908 struct adiv5_dap *swjdp = armv7a->arm.dap;
2909 int i;
2910 int retval = ERROR_OK;
2911 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2912
2913 retval = dap_dp_init(swjdp);
2914 if (retval != ERROR_OK) {
2915 LOG_ERROR("Could not initialize the debug port");
2916 return retval;
2917 }
2918
2919 /* Search for the APB-AB - it is needed for access to debug registers */
2920 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2921 if (retval != ERROR_OK) {
2922 LOG_ERROR("Could not find APB-AP for debug access");
2923 return retval;
2924 }
2925
2926 retval = mem_ap_init(armv7a->debug_ap);
2927 if (retval != ERROR_OK) {
2928 LOG_ERROR("Could not initialize the APB-AP");
2929 return retval;
2930 }
2931
2932 armv7a->debug_ap->memaccess_tck = 80;
2933
2934 /* Search for the AHB-AB.
2935 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2936 * can access system memory. */
2937 armv7a->memory_ap_available = false;
2938 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2939 if (retval == ERROR_OK) {
2940 retval = mem_ap_init(armv7a->memory_ap);
2941 if (retval == ERROR_OK)
2942 armv7a->memory_ap_available = true;
2943 else
2944 LOG_WARNING("Could not initialize AHB-AP for memory access - using APB-AP");
2945 } else {
2946 /* AHB-AP not found - use APB-AP */
2947 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2948 }
2949
2950 if (!target->dbgbase_set) {
2951 uint32_t dbgbase;
2952 /* Get ROM Table base */
2953 uint32_t apid;
2954 int32_t coreidx = target->coreid;
2955 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2956 target->cmd_name);
2957 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2958 if (retval != ERROR_OK)
2959 return retval;
2960 /* Lookup 0x15 -- Processor DAP */
2961 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2962 &armv7a->debug_base, &coreidx);
2963 if (retval != ERROR_OK) {
2964 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2965 target->cmd_name);
2966 return retval;
2967 }
2968 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2969 target->coreid, armv7a->debug_base);
2970 } else
2971 armv7a->debug_base = target->dbgbase;
2972
2973 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2974 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2975 if (retval != ERROR_OK)
2976 return retval;
2977
2978 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2979 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2980 if (retval != ERROR_OK) {
2981 LOG_DEBUG("Examine %s failed", "CPUID");
2982 return retval;
2983 }
2984
2985 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2986 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2987 if (retval != ERROR_OK) {
2988 LOG_DEBUG("Examine %s failed", "CTYPR");
2989 return retval;
2990 }
2991
2992 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2993 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2994 if (retval != ERROR_OK) {
2995 LOG_DEBUG("Examine %s failed", "TTYPR");
2996 return retval;
2997 }
2998
2999 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3000 armv7a->debug_base + CPUDBG_DIDR, &didr);
3001 if (retval != ERROR_OK) {
3002 LOG_DEBUG("Examine %s failed", "DIDR");
3003 return retval;
3004 }
3005
3006 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3007 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
3008 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
3009 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3010
3011 cortex_a->cpuid = cpuid;
3012 cortex_a->ctypr = ctypr;
3013 cortex_a->ttypr = ttypr;
3014 cortex_a->didr = didr;
3015
3016 /* Unlocking the debug registers */
3017 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3018 CORTEX_A15_PARTNUM) {
3019
3020 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3021 armv7a->debug_base + CPUDBG_OSLAR,
3022 0);
3023
3024 if (retval != ERROR_OK)
3025 return retval;
3026
3027 }
3028 /* Unlocking the debug registers */
3029 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3030 CORTEX_A7_PARTNUM) {
3031
3032 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3033 armv7a->debug_base + CPUDBG_OSLAR,
3034 0);
3035
3036 if (retval != ERROR_OK)
3037 return retval;
3038
3039 }
3040 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3041 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3042
3043 if (retval != ERROR_OK)
3044 return retval;
3045
3046 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3047
3048 armv7a->arm.core_type = ARM_MODE_MON;
3049
3050 /* Avoid recreating the registers cache */
3051 if (!target_was_examined(target)) {
3052 retval = cortex_a_dpm_setup(cortex_a, didr);
3053 if (retval != ERROR_OK)
3054 return retval;
3055 }
3056
3057 /* Setup Breakpoint Register Pairs */
3058 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3059 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3060 cortex_a->brp_num_available = cortex_a->brp_num;
3061 free(cortex_a->brp_list);
3062 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3063 /* cortex_a->brb_enabled = ????; */
3064 for (i = 0; i < cortex_a->brp_num; i++) {
3065 cortex_a->brp_list[i].used = 0;
3066 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3067 cortex_a->brp_list[i].type = BRP_NORMAL;
3068 else
3069 cortex_a->brp_list[i].type = BRP_CONTEXT;
3070 cortex_a->brp_list[i].value = 0;
3071 cortex_a->brp_list[i].control = 0;
3072 cortex_a->brp_list[i].BRPn = i;
3073 }
3074
3075 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3076
3077 target_set_examined(target);
3078 return ERROR_OK;
3079 }
3080
3081 static int cortex_a_examine(struct target *target)
3082 {
3083 int retval = ERROR_OK;
3084
3085 /* Reestablish communication after target reset */
3086 retval = cortex_a_examine_first(target);
3087
3088 /* Configure core debug access */
3089 if (retval == ERROR_OK)
3090 retval = cortex_a_init_debug_access(target);
3091
3092 return retval;
3093 }
3094
3095 /*
3096 * Cortex-A target creation and initialization
3097 */
3098
3099 static int cortex_a_init_target(struct command_context *cmd_ctx,
3100 struct target *target)
3101 {
3102 /* examine_first() does a bunch of this */
3103 return ERROR_OK;
3104 }
3105
3106 static int cortex_a_init_arch_info(struct target *target,
3107 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3108 {
3109 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3110
3111 /* Setup struct cortex_a_common */
3112 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3113
3114 /* tap has no dap initialized */
3115 if (!tap->dap) {
3116 tap->dap = dap_init();
3117
3118 /* Leave (only) generic DAP stuff for debugport_init() */
3119 tap->dap->tap = tap;
3120 }
3121
3122 armv7a->arm.dap = tap->dap;
3123
3124 cortex_a->fast_reg_read = 0;
3125
3126 /* register arch-specific functions */
3127 armv7a->examine_debug_reason = NULL;
3128
3129 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3130
3131 armv7a->pre_restore_context = NULL;
3132
3133 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3134
3135
3136 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3137
3138 /* REVISIT v7a setup should be in a v7a-specific routine */
3139 armv7a_init_arch_info(target, armv7a);
3140 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3141
3142 return ERROR_OK;
3143 }
3144
3145 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3146 {
3147 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3148
3149 cortex_a->armv7a_common.is_armv7r = false;
3150
3151 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3152 }
3153
3154 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3155 {
3156 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3157
3158 cortex_a->armv7a_common.is_armv7r = true;
3159
3160 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3161 }
3162
3163 static void cortex_a_deinit_target(struct target *target)
3164 {
3165 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3166 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3167
3168 free(cortex_a->brp_list);
3169 free(dpm->dbp);
3170 free(dpm->dwp);
3171 free(cortex_a);
3172 }
3173
3174 static int cortex_a_mmu(struct target *target, int *enabled)
3175 {
3176 struct armv7a_common *armv7a = target_to_armv7a(target);
3177
3178 if (target->state != TARGET_HALTED) {
3179 LOG_ERROR("%s: target not halted", __func__);
3180 return ERROR_TARGET_INVALID;
3181 }
3182
3183 if (armv7a->is_armv7r)
3184 *enabled = 0;
3185 else
3186 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3187
3188 return ERROR_OK;
3189 }
3190
3191 static int cortex_a_virt2phys(struct target *target,
3192 uint32_t virt, uint32_t *phys)
3193 {
3194 int retval = ERROR_FAIL;
3195 struct armv7a_common *armv7a = target_to_armv7a(target);
3196 struct adiv5_dap *swjdp = armv7a->arm.dap;
3197 uint8_t apsel = swjdp->apsel;
3198 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3199 uint32_t ret;
3200 retval = armv7a_mmu_translate_va(target,
3201 virt, &ret);
3202 if (retval != ERROR_OK)
3203 goto done;
3204 *phys = ret;
3205 } else {/* use this method if armv7a->memory_ap not selected
3206 * mmu must be enable in order to get a correct translation */
3207 retval = cortex_a_mmu_modify(target, 1);
3208 if (retval != ERROR_OK)
3209 goto done;
3210 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3211 }
3212 done:
3213 return retval;
3214 }
3215
3216 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3217 {
3218 struct target *target = get_current_target(CMD_CTX);
3219 struct armv7a_common *armv7a = target_to_armv7a(target);
3220
3221 return armv7a_handle_cache_info_command(CMD_CTX,
3222 &armv7a->armv7a_mmu.armv7a_cache);
3223 }
3224
3225
3226 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3227 {
3228 struct target *target = get_current_target(CMD_CTX);
3229 if (!target_was_examined(target)) {
3230 LOG_ERROR("target not examined yet");
3231 return ERROR_FAIL;
3232 }
3233
3234 return cortex_a_init_debug_access(target);
3235 }
3236 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3237 {
3238 struct target *target = get_current_target(CMD_CTX);
3239 /* check target is an smp target */
3240 struct target_list *head;
3241 struct target *curr;
3242 head = target->head;
3243 target->smp = 0;
3244 if (head != (struct target_list *)NULL) {
3245 while (head != (struct target_list *)NULL) {
3246 curr = head->target;
3247 curr->smp = 0;
3248 head = head->next;
3249 }
3250 /* fixes the target display to the debugger */
3251 target->gdb_service->target = target;
3252 }
3253 return ERROR_OK;
3254 }
3255
3256 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3257 {
3258 struct target *target = get_current_target(CMD_CTX);
3259 struct target_list *head;
3260 struct target *curr;
3261 head = target->head;
3262 if (head != (struct target_list *)NULL) {
3263 target->smp = 1;
3264 while (head != (struct target_list *)NULL) {
3265 curr = head->target;
3266 curr->smp = 1;
3267 head = head->next;
3268 }
3269 }
3270 return ERROR_OK;
3271 }
3272
3273 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3274 {
3275 struct target *target = get_current_target(CMD_CTX);
3276 int retval = ERROR_OK;
3277 struct target_list *head;
3278 head = target->head;
3279 if (head != (struct target_list *)NULL) {
3280 if (CMD_ARGC == 1) {
3281 int coreid = 0;
3282 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3283 if (ERROR_OK != retval)
3284 return retval;
3285 target->gdb_service->core[1] = coreid;
3286
3287 }
3288 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3289 , target->gdb_service->core[1]);
3290 }
3291 return ERROR_OK;
3292 }
3293
3294 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3295 {
3296 struct target *target = get_current_target(CMD_CTX);
3297 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3298
3299 static const Jim_Nvp nvp_maskisr_modes[] = {
3300 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3301 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3302 { .name = NULL, .value = -1 },
3303 };
3304 const Jim_Nvp *n;
3305
3306 if (target->state != TARGET_HALTED) {
3307 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3308 return ERROR_OK;
3309 }
3310
3311 if (CMD_ARGC > 0) {
3312 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3313 if (n->name == NULL)
3314 return ERROR_COMMAND_SYNTAX_ERROR;
3315 cortex_a->isrmasking_mode = n->value;
3316
3317 }
3318
3319 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3320 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3321
3322 return ERROR_OK;
3323 }
3324
3325 static const struct command_registration cortex_a_exec_command_handlers[] = {
3326 {
3327 .name = "cache_info",
3328 .handler = cortex_a_handle_cache_info_command,
3329 .mode = COMMAND_EXEC,
3330 .help = "display information about target caches",
3331 .usage = "",
3332 },
3333 {
3334 .name = "dbginit",
3335 .handler = cortex_a_handle_dbginit_command,
3336 .mode = COMMAND_EXEC,
3337 .help = "Initialize core debug",
3338 .usage = "",
3339 },
3340 { .name = "smp_off",
3341 .handler = cortex_a_handle_smp_off_command,
3342 .mode = COMMAND_EXEC,
3343 .help = "Stop smp handling",
3344 .usage = "",},
3345 {
3346 .name = "smp_on",
3347 .handler = cortex_a_handle_smp_on_command,
3348 .mode = COMMAND_EXEC,
3349 .help = "Restart smp handling",
3350 .usage = "",
3351 },
3352 {
3353 .name = "smp_gdb",
3354 .handler = cortex_a_handle_smp_gdb_command,
3355 .mode = COMMAND_EXEC,
3356 .help = "display/fix current core played to gdb",
3357 .usage = "",
3358 },
3359 {
3360 .name = "maskisr",
3361 .handler = handle_cortex_a_mask_interrupts_command,
3362 .mode = COMMAND_EXEC,
3363 .help = "mask cortex_a interrupts",
3364 .usage = "['on'|'off']",
3365 },
3366
3367
3368 COMMAND_REGISTRATION_DONE
3369 };
3370 static const struct command_registration cortex_a_command_handlers[] = {
3371 {
3372 .chain = arm_command_handlers,
3373 },
3374 {
3375 .chain = armv7a_command_handlers,
3376 },
3377 {
3378 .name = "cortex_a",
3379 .mode = COMMAND_ANY,
3380 .help = "Cortex-A command group",
3381 .usage = "",
3382 .chain = cortex_a_exec_command_handlers,
3383 },
3384 COMMAND_REGISTRATION_DONE
3385 };
3386
3387 struct target_type cortexa_target = {
3388 .name = "cortex_a",
3389 .deprecated_name = "cortex_a8",
3390
3391 .poll = cortex_a_poll,
3392 .arch_state = armv7a_arch_state,
3393
3394 .halt = cortex_a_halt,
3395 .resume = cortex_a_resume,
3396 .step = cortex_a_step,
3397
3398 .assert_reset = cortex_a_assert_reset,
3399 .deassert_reset = cortex_a_deassert_reset,
3400
3401 /* REVISIT allow exporting VFP3 registers ... */
3402 .get_gdb_reg_list = arm_get_gdb_reg_list,
3403
3404 .read_memory = cortex_a_read_memory,
3405 .write_memory = cortex_a_write_memory,
3406
3407 .read_buffer = cortex_a_read_buffer,
3408 .write_buffer = cortex_a_write_buffer,
3409
3410 .checksum_memory = arm_checksum_memory,
3411 .blank_check_memory = arm_blank_check_memory,
3412
3413 .run_algorithm = armv4_5_run_algorithm,
3414
3415 .add_breakpoint = cortex_a_add_breakpoint,
3416 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3417 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3418 .remove_breakpoint = cortex_a_remove_breakpoint,
3419 .add_watchpoint = NULL,
3420 .remove_watchpoint = NULL,
3421
3422 .commands = cortex_a_command_handlers,
3423 .target_create = cortex_a_target_create,
3424 .init_target = cortex_a_init_target,
3425 .examine = cortex_a_examine,
3426 .deinit_target = cortex_a_deinit_target,
3427
3428 .read_phys_memory = cortex_a_read_phys_memory,
3429 .write_phys_memory = cortex_a_write_phys_memory,
3430 .mmu = cortex_a_mmu,
3431 .virt2phys = cortex_a_virt2phys,
3432 };
3433
3434 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3435 {
3436 .name = "cache_info",
3437 .handler = cortex_a_handle_cache_info_command,
3438 .mode = COMMAND_EXEC,
3439 .help = "display information about target caches",
3440 .usage = "",
3441 },
3442 {
3443 .name = "dbginit",
3444 .handler = cortex_a_handle_dbginit_command,
3445 .mode = COMMAND_EXEC,
3446 .help = "Initialize core debug",
3447 .usage = "",
3448 },
3449 {
3450 .name = "maskisr",
3451 .handler = handle_cortex_a_mask_interrupts_command,
3452 .mode = COMMAND_EXEC,
3453 .help = "mask cortex_r4 interrupts",
3454 .usage = "['on'|'off']",
3455 },
3456
3457 COMMAND_REGISTRATION_DONE
3458 };
3459 static const struct command_registration cortex_r4_command_handlers[] = {
3460 {
3461 .chain = arm_command_handlers,
3462 },
3463 {
3464 .chain = armv7a_command_handlers,
3465 },
3466 {
3467 .name = "cortex_r4",
3468 .mode = COMMAND_ANY,
3469 .help = "Cortex-R4 command group",
3470 .usage = "",
3471 .chain = cortex_r4_exec_command_handlers,
3472 },
3473 COMMAND_REGISTRATION_DONE
3474 };
3475
3476 struct target_type cortexr4_target = {
3477 .name = "cortex_r4",
3478
3479 .poll = cortex_a_poll,
3480 .arch_state = armv7a_arch_state,
3481
3482 .halt = cortex_a_halt,
3483 .resume = cortex_a_resume,
3484 .step = cortex_a_step,
3485
3486 .assert_reset = cortex_a_assert_reset,
3487 .deassert_reset = cortex_a_deassert_reset,
3488
3489 /* REVISIT allow exporting VFP3 registers ... */
3490 .get_gdb_reg_list = arm_get_gdb_reg_list,
3491
3492 .read_memory = cortex_a_read_memory,
3493 .write_memory = cortex_a_write_memory,
3494
3495 .checksum_memory = arm_checksum_memory,
3496 .blank_check_memory = arm_blank_check_memory,
3497
3498 .run_algorithm = armv4_5_run_algorithm,
3499
3500 .add_breakpoint = cortex_a_add_breakpoint,
3501 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3502 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3503 .remove_breakpoint = cortex_a_remove_breakpoint,
3504 .add_watchpoint = NULL,
3505 .remove_watchpoint = NULL,
3506
3507 .commands = cortex_r4_command_handlers,
3508 .target_create = cortex_r4_target_create,
3509 .init_target = cortex_a_init_target,
3510 .examine = cortex_a_examine,
3511 .deinit_target = cortex_a_deinit_target,
3512 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)