cortex_a/r/m: fix handling of un-examined cores
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include <helper/time_support.h>
58
59 static int cortex_a_poll(struct target *target);
60 static int cortex_a_debug_entry(struct target *target);
61 static int cortex_a_restore_context(struct target *target, bool bpwp);
62 static int cortex_a_set_breakpoint(struct target *target,
63 struct breakpoint *breakpoint, uint8_t matchmode);
64 static int cortex_a_set_context_breakpoint(struct target *target,
65 struct breakpoint *breakpoint, uint8_t matchmode);
66 static int cortex_a_set_hybrid_breakpoint(struct target *target,
67 struct breakpoint *breakpoint);
68 static int cortex_a_unset_breakpoint(struct target *target,
69 struct breakpoint *breakpoint);
70 static int cortex_a_dap_read_coreregister_u32(struct target *target,
71 uint32_t *value, int regnum);
72 static int cortex_a_dap_write_coreregister_u32(struct target *target,
73 uint32_t value, int regnum);
74 static int cortex_a_mmu(struct target *target, int *enabled);
75 static int cortex_a_mmu_modify(struct target *target, int enable);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_cpu_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /*
101 * Set up ARM core for memory access.
102 * If !phys_access, switch to SVC mode and make sure MMU is on
103 * If phys_access, switch off mmu
104 */
105 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
106 {
107 struct armv7a_common *armv7a = target_to_armv7a(target);
108 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
109 int mmu_enabled = 0;
110
111 if (phys_access == 0) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a_mmu(target, &mmu_enabled);
114 if (mmu_enabled)
115 cortex_a_mmu_modify(target, 1);
116 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
117 /* overwrite DACR to all-manager */
118 armv7a->arm.mcr(target, 15,
119 0, 0, 3, 0,
120 0xFFFFFFFF);
121 }
122 } else {
123 cortex_a_mmu(target, &mmu_enabled);
124 if (mmu_enabled)
125 cortex_a_mmu_modify(target, 0);
126 }
127 return ERROR_OK;
128 }
129
130 /*
131 * Restore ARM core after memory access.
132 * If !phys_access, switch to previous mode
133 * If phys_access, restore MMU setting
134 */
135 static int cortex_a_post_memaccess(struct target *target, int phys_access)
136 {
137 struct armv7a_common *armv7a = target_to_armv7a(target);
138 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
139
140 if (phys_access == 0) {
141 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
142 /* restore */
143 armv7a->arm.mcr(target, 15,
144 0, 0, 3, 0,
145 cortex_a->cp15_dacr_reg);
146 }
147 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
148 } else {
149 int mmu_enabled = 0;
150 cortex_a_mmu(target, &mmu_enabled);
151 if (mmu_enabled)
152 cortex_a_mmu_modify(target, 1);
153 }
154 return ERROR_OK;
155 }
156
157
158 /* modify cp15_control_reg in order to enable or disable mmu for :
159 * - virt2phys address conversion
160 * - read or write memory in phys or virt address */
161 static int cortex_a_mmu_modify(struct target *target, int enable)
162 {
163 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
164 struct armv7a_common *armv7a = target_to_armv7a(target);
165 int retval = ERROR_OK;
166 int need_write = 0;
167
168 if (enable) {
169 /* if mmu enabled at target stop and mmu not enable */
170 if (!(cortex_a->cp15_control_reg & 0x1U)) {
171 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
172 return ERROR_FAIL;
173 }
174 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
175 cortex_a->cp15_control_reg_curr |= 0x1U;
176 need_write = 1;
177 }
178 } else {
179 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
180 cortex_a->cp15_control_reg_curr &= ~0x1U;
181 need_write = 1;
182 }
183 }
184
185 if (need_write) {
186 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
187 enable ? "enable mmu" : "disable mmu",
188 cortex_a->cp15_control_reg_curr);
189
190 retval = armv7a->arm.mcr(target, 15,
191 0, 0, /* op1, op2 */
192 1, 0, /* CRn, CRm */
193 cortex_a->cp15_control_reg_curr);
194 }
195 return retval;
196 }
197
198 /*
199 * Cortex-A Basic debug access, very low level assumes state is saved
200 */
201 static int cortex_a8_init_debug_access(struct target *target)
202 {
203 struct armv7a_common *armv7a = target_to_armv7a(target);
204 int retval;
205
206 LOG_DEBUG(" ");
207
208 /* Unlocking the debug registers for modification
209 * The debugport might be uninitialised so try twice */
210 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
211 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
212 if (retval != ERROR_OK) {
213 /* try again */
214 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
215 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
216 if (retval == ERROR_OK)
217 LOG_USER(
218 "Locking debug access failed on first, but succeeded on second try.");
219 }
220
221 return retval;
222 }
223
224 /*
225 * Cortex-A Basic debug access, very low level assumes state is saved
226 */
227 static int cortex_a_init_debug_access(struct target *target)
228 {
229 struct armv7a_common *armv7a = target_to_armv7a(target);
230 int retval;
231 uint32_t dbg_osreg;
232 uint32_t cortex_part_num;
233 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
234
235 LOG_DEBUG(" ");
236 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
237 CORTEX_A_MIDR_PARTNUM_SHIFT;
238
239 switch (cortex_part_num) {
240 case CORTEX_A7_PARTNUM:
241 case CORTEX_A15_PARTNUM:
242 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
243 armv7a->debug_base + CPUDBG_OSLSR,
244 &dbg_osreg);
245 if (retval != ERROR_OK)
246 return retval;
247
248 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
249
250 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
251 /* Unlocking the DEBUG OS registers for modification */
252 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
253 armv7a->debug_base + CPUDBG_OSLAR,
254 0);
255 break;
256
257 case CORTEX_A5_PARTNUM:
258 case CORTEX_A8_PARTNUM:
259 case CORTEX_A9_PARTNUM:
260 default:
261 retval = cortex_a8_init_debug_access(target);
262 }
263
264 if (retval != ERROR_OK)
265 return retval;
266 /* Clear Sticky Power Down status Bit in PRSR to enable access to
267 the registers in the Core Power Domain */
268 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
269 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
270 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
271
272 if (retval != ERROR_OK)
273 return retval;
274
275 /* Disable cacheline fills and force cache write-through in debug state */
276 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
277 armv7a->debug_base + CPUDBG_DSCCR, 0);
278 if (retval != ERROR_OK)
279 return retval;
280
281 /* Disable TLB lookup and refill/eviction in debug state */
282 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
283 armv7a->debug_base + CPUDBG_DSMCR, 0);
284 if (retval != ERROR_OK)
285 return retval;
286
287 /* Enabling of instruction execution in debug mode is done in debug_entry code */
288
289 /* Resync breakpoint registers */
290
291 /* Since this is likely called from init or reset, update target state information*/
292 return cortex_a_poll(target);
293 }
294
295 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
296 {
297 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
298 * Writes final value of DSCR into *dscr. Pass force to force always
299 * reading DSCR at least once. */
300 struct armv7a_common *armv7a = target_to_armv7a(target);
301 int64_t then = timeval_ms();
302 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
303 force = false;
304 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
305 armv7a->debug_base + CPUDBG_DSCR, dscr);
306 if (retval != ERROR_OK) {
307 LOG_ERROR("Could not read DSCR register");
308 return retval;
309 }
310 if (timeval_ms() > then + 1000) {
311 LOG_ERROR("Timeout waiting for InstrCompl=1");
312 return ERROR_FAIL;
313 }
314 }
315 return ERROR_OK;
316 }
317
318 /* To reduce needless round-trips, pass in a pointer to the current
319 * DSCR value. Initialize it to zero if you just need to know the
320 * value on return from this function; or DSCR_INSTR_COMP if you
321 * happen to know that no instruction is pending.
322 */
323 static int cortex_a_exec_opcode(struct target *target,
324 uint32_t opcode, uint32_t *dscr_p)
325 {
326 uint32_t dscr;
327 int retval;
328 struct armv7a_common *armv7a = target_to_armv7a(target);
329
330 dscr = dscr_p ? *dscr_p : 0;
331
332 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
333
334 /* Wait for InstrCompl bit to be set */
335 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
336 if (retval != ERROR_OK)
337 return retval;
338
339 retval = mem_ap_write_u32(armv7a->debug_ap,
340 armv7a->debug_base + CPUDBG_ITR, opcode);
341 if (retval != ERROR_OK)
342 return retval;
343
344 int64_t then = timeval_ms();
345 do {
346 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
347 armv7a->debug_base + CPUDBG_DSCR, &dscr);
348 if (retval != ERROR_OK) {
349 LOG_ERROR("Could not read DSCR register");
350 return retval;
351 }
352 if (timeval_ms() > then + 1000) {
353 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
354 return ERROR_FAIL;
355 }
356 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
357
358 if (dscr_p)
359 *dscr_p = dscr;
360
361 return retval;
362 }
363
364 /**************************************************************************
365 Read core register with very few exec_opcode, fast but needs work_area.
366 This can cause problems with MMU active.
367 **************************************************************************/
368 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
369 uint32_t *regfile)
370 {
371 int retval = ERROR_OK;
372 struct armv7a_common *armv7a = target_to_armv7a(target);
373
374 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
375 if (retval != ERROR_OK)
376 return retval;
377 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
378 if (retval != ERROR_OK)
379 return retval;
380 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
381 if (retval != ERROR_OK)
382 return retval;
383
384 retval = mem_ap_read_buf(armv7a->memory_ap,
385 (uint8_t *)(&regfile[1]), 4, 15, address);
386
387 return retval;
388 }
389
390 static int cortex_a_dap_read_coreregister_u32(struct target *target,
391 uint32_t *value, int regnum)
392 {
393 int retval = ERROR_OK;
394 uint8_t reg = regnum&0xFF;
395 uint32_t dscr = 0;
396 struct armv7a_common *armv7a = target_to_armv7a(target);
397
398 if (reg > 17)
399 return retval;
400
401 if (reg < 15) {
402 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
403 retval = cortex_a_exec_opcode(target,
404 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
405 &dscr);
406 if (retval != ERROR_OK)
407 return retval;
408 } else if (reg == 15) {
409 /* "MOV r0, r15"; then move r0 to DCCTX */
410 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
411 if (retval != ERROR_OK)
412 return retval;
413 retval = cortex_a_exec_opcode(target,
414 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
415 &dscr);
416 if (retval != ERROR_OK)
417 return retval;
418 } else {
419 /* "MRS r0, CPSR" or "MRS r0, SPSR"
420 * then move r0 to DCCTX
421 */
422 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
423 if (retval != ERROR_OK)
424 return retval;
425 retval = cortex_a_exec_opcode(target,
426 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
427 &dscr);
428 if (retval != ERROR_OK)
429 return retval;
430 }
431
432 /* Wait for DTRRXfull then read DTRRTX */
433 int64_t then = timeval_ms();
434 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
435 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
436 armv7a->debug_base + CPUDBG_DSCR, &dscr);
437 if (retval != ERROR_OK)
438 return retval;
439 if (timeval_ms() > then + 1000) {
440 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
441 return ERROR_FAIL;
442 }
443 }
444
445 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
446 armv7a->debug_base + CPUDBG_DTRTX, value);
447 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
448
449 return retval;
450 }
451
452 static int cortex_a_dap_write_coreregister_u32(struct target *target,
453 uint32_t value, int regnum)
454 {
455 int retval = ERROR_OK;
456 uint8_t Rd = regnum&0xFF;
457 uint32_t dscr;
458 struct armv7a_common *armv7a = target_to_armv7a(target);
459
460 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
461
462 /* Check that DCCRX is not full */
463 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
464 armv7a->debug_base + CPUDBG_DSCR, &dscr);
465 if (retval != ERROR_OK)
466 return retval;
467 if (dscr & DSCR_DTR_RX_FULL) {
468 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
469 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
470 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
471 &dscr);
472 if (retval != ERROR_OK)
473 return retval;
474 }
475
476 if (Rd > 17)
477 return retval;
478
479 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
480 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
481 retval = mem_ap_write_u32(armv7a->debug_ap,
482 armv7a->debug_base + CPUDBG_DTRRX, value);
483 if (retval != ERROR_OK)
484 return retval;
485
486 if (Rd < 15) {
487 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
488 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
489 &dscr);
490
491 if (retval != ERROR_OK)
492 return retval;
493 } else if (Rd == 15) {
494 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
495 * then "mov r15, r0"
496 */
497 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
498 &dscr);
499 if (retval != ERROR_OK)
500 return retval;
501 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
502 if (retval != ERROR_OK)
503 return retval;
504 } else {
505 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
506 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
507 */
508 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
509 &dscr);
510 if (retval != ERROR_OK)
511 return retval;
512 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
513 &dscr);
514 if (retval != ERROR_OK)
515 return retval;
516
517 /* "Prefetch flush" after modifying execution status in CPSR */
518 if (Rd == 16) {
519 retval = cortex_a_exec_opcode(target,
520 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
521 &dscr);
522 if (retval != ERROR_OK)
523 return retval;
524 }
525 }
526
527 return retval;
528 }
529
530 /* Write to memory mapped registers directly with no cache or mmu handling */
531 static int cortex_a_dap_write_memap_register_u32(struct target *target,
532 uint32_t address,
533 uint32_t value)
534 {
535 int retval;
536 struct armv7a_common *armv7a = target_to_armv7a(target);
537
538 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
539
540 return retval;
541 }
542
543 /*
544 * Cortex-A implementation of Debug Programmer's Model
545 *
546 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
547 * so there's no need to poll for it before executing an instruction.
548 *
549 * NOTE that in several of these cases the "stall" mode might be useful.
550 * It'd let us queue a few operations together... prepare/finish might
551 * be the places to enable/disable that mode.
552 */
553
554 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
555 {
556 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
557 }
558
559 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
560 {
561 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
562 return mem_ap_write_u32(a->armv7a_common.debug_ap,
563 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
564 }
565
566 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
567 uint32_t *dscr_p)
568 {
569 uint32_t dscr = DSCR_INSTR_COMP;
570 int retval;
571
572 if (dscr_p)
573 dscr = *dscr_p;
574
575 /* Wait for DTRRXfull */
576 int64_t then = timeval_ms();
577 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
578 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
579 a->armv7a_common.debug_base + CPUDBG_DSCR,
580 &dscr);
581 if (retval != ERROR_OK)
582 return retval;
583 if (timeval_ms() > then + 1000) {
584 LOG_ERROR("Timeout waiting for read dcc");
585 return ERROR_FAIL;
586 }
587 }
588
589 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
590 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
591 if (retval != ERROR_OK)
592 return retval;
593 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
594
595 if (dscr_p)
596 *dscr_p = dscr;
597
598 return retval;
599 }
600
601 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
602 {
603 struct cortex_a_common *a = dpm_to_a(dpm);
604 uint32_t dscr;
605 int retval;
606
607 /* set up invariant: INSTR_COMP is set after ever DPM operation */
608 int64_t then = timeval_ms();
609 for (;; ) {
610 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
611 a->armv7a_common.debug_base + CPUDBG_DSCR,
612 &dscr);
613 if (retval != ERROR_OK)
614 return retval;
615 if ((dscr & DSCR_INSTR_COMP) != 0)
616 break;
617 if (timeval_ms() > then + 1000) {
618 LOG_ERROR("Timeout waiting for dpm prepare");
619 return ERROR_FAIL;
620 }
621 }
622
623 /* this "should never happen" ... */
624 if (dscr & DSCR_DTR_RX_FULL) {
625 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
626 /* Clear DCCRX */
627 retval = cortex_a_exec_opcode(
628 a->armv7a_common.arm.target,
629 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
630 &dscr);
631 if (retval != ERROR_OK)
632 return retval;
633 }
634
635 return retval;
636 }
637
638 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
639 {
640 /* REVISIT what could be done here? */
641 return ERROR_OK;
642 }
643
644 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
645 uint32_t opcode, uint32_t data)
646 {
647 struct cortex_a_common *a = dpm_to_a(dpm);
648 int retval;
649 uint32_t dscr = DSCR_INSTR_COMP;
650
651 retval = cortex_a_write_dcc(a, data);
652 if (retval != ERROR_OK)
653 return retval;
654
655 return cortex_a_exec_opcode(
656 a->armv7a_common.arm.target,
657 opcode,
658 &dscr);
659 }
660
661 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
662 uint32_t opcode, uint32_t data)
663 {
664 struct cortex_a_common *a = dpm_to_a(dpm);
665 uint32_t dscr = DSCR_INSTR_COMP;
666 int retval;
667
668 retval = cortex_a_write_dcc(a, data);
669 if (retval != ERROR_OK)
670 return retval;
671
672 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
673 retval = cortex_a_exec_opcode(
674 a->armv7a_common.arm.target,
675 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
676 &dscr);
677 if (retval != ERROR_OK)
678 return retval;
679
680 /* then the opcode, taking data from R0 */
681 retval = cortex_a_exec_opcode(
682 a->armv7a_common.arm.target,
683 opcode,
684 &dscr);
685
686 return retval;
687 }
688
689 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
690 {
691 struct target *target = dpm->arm->target;
692 uint32_t dscr = DSCR_INSTR_COMP;
693
694 /* "Prefetch flush" after modifying execution status in CPSR */
695 return cortex_a_exec_opcode(target,
696 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
697 &dscr);
698 }
699
700 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
701 uint32_t opcode, uint32_t *data)
702 {
703 struct cortex_a_common *a = dpm_to_a(dpm);
704 int retval;
705 uint32_t dscr = DSCR_INSTR_COMP;
706
707 /* the opcode, writing data to DCC */
708 retval = cortex_a_exec_opcode(
709 a->armv7a_common.arm.target,
710 opcode,
711 &dscr);
712 if (retval != ERROR_OK)
713 return retval;
714
715 return cortex_a_read_dcc(a, data, &dscr);
716 }
717
718
719 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
720 uint32_t opcode, uint32_t *data)
721 {
722 struct cortex_a_common *a = dpm_to_a(dpm);
723 uint32_t dscr = DSCR_INSTR_COMP;
724 int retval;
725
726 /* the opcode, writing data to R0 */
727 retval = cortex_a_exec_opcode(
728 a->armv7a_common.arm.target,
729 opcode,
730 &dscr);
731 if (retval != ERROR_OK)
732 return retval;
733
734 /* write R0 to DCC */
735 retval = cortex_a_exec_opcode(
736 a->armv7a_common.arm.target,
737 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
738 &dscr);
739 if (retval != ERROR_OK)
740 return retval;
741
742 return cortex_a_read_dcc(a, data, &dscr);
743 }
744
745 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
746 uint32_t addr, uint32_t control)
747 {
748 struct cortex_a_common *a = dpm_to_a(dpm);
749 uint32_t vr = a->armv7a_common.debug_base;
750 uint32_t cr = a->armv7a_common.debug_base;
751 int retval;
752
753 switch (index_t) {
754 case 0 ... 15: /* breakpoints */
755 vr += CPUDBG_BVR_BASE;
756 cr += CPUDBG_BCR_BASE;
757 break;
758 case 16 ... 31: /* watchpoints */
759 vr += CPUDBG_WVR_BASE;
760 cr += CPUDBG_WCR_BASE;
761 index_t -= 16;
762 break;
763 default:
764 return ERROR_FAIL;
765 }
766 vr += 4 * index_t;
767 cr += 4 * index_t;
768
769 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
770 (unsigned) vr, (unsigned) cr);
771
772 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
773 vr, addr);
774 if (retval != ERROR_OK)
775 return retval;
776 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
777 cr, control);
778 return retval;
779 }
780
781 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
782 {
783 struct cortex_a_common *a = dpm_to_a(dpm);
784 uint32_t cr;
785
786 switch (index_t) {
787 case 0 ... 15:
788 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
789 break;
790 case 16 ... 31:
791 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
792 index_t -= 16;
793 break;
794 default:
795 return ERROR_FAIL;
796 }
797 cr += 4 * index_t;
798
799 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
800
801 /* clear control register */
802 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
803 }
804
805 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
806 {
807 struct arm_dpm *dpm = &a->armv7a_common.dpm;
808 int retval;
809
810 dpm->arm = &a->armv7a_common.arm;
811 dpm->didr = didr;
812
813 dpm->prepare = cortex_a_dpm_prepare;
814 dpm->finish = cortex_a_dpm_finish;
815
816 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
817 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
818 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
819
820 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
821 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
822
823 dpm->bpwp_enable = cortex_a_bpwp_enable;
824 dpm->bpwp_disable = cortex_a_bpwp_disable;
825
826 retval = arm_dpm_setup(dpm);
827 if (retval == ERROR_OK)
828 retval = arm_dpm_initialize(dpm);
829
830 return retval;
831 }
832 static struct target *get_cortex_a(struct target *target, int32_t coreid)
833 {
834 struct target_list *head;
835 struct target *curr;
836
837 head = target->head;
838 while (head != (struct target_list *)NULL) {
839 curr = head->target;
840 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
841 return curr;
842 head = head->next;
843 }
844 return target;
845 }
846 static int cortex_a_halt(struct target *target);
847
848 static int cortex_a_halt_smp(struct target *target)
849 {
850 int retval = 0;
851 struct target_list *head;
852 struct target *curr;
853 head = target->head;
854 while (head != (struct target_list *)NULL) {
855 curr = head->target;
856 if ((curr != target) && (curr->state != TARGET_HALTED)
857 && target_was_examined(curr))
858 retval += cortex_a_halt(curr);
859 head = head->next;
860 }
861 return retval;
862 }
863
864 static int update_halt_gdb(struct target *target)
865 {
866 int retval = 0;
867 if (target->gdb_service && target->gdb_service->core[0] == -1) {
868 target->gdb_service->target = target;
869 target->gdb_service->core[0] = target->coreid;
870 retval += cortex_a_halt_smp(target);
871 }
872 return retval;
873 }
874
875 /*
876 * Cortex-A Run control
877 */
878
879 static int cortex_a_poll(struct target *target)
880 {
881 int retval = ERROR_OK;
882 uint32_t dscr;
883 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
884 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
885 enum target_state prev_target_state = target->state;
886 /* toggle to another core is done by gdb as follow */
887 /* maint packet J core_id */
888 /* continue */
889 /* the next polling trigger an halt event sent to gdb */
890 if ((target->state == TARGET_HALTED) && (target->smp) &&
891 (target->gdb_service) &&
892 (target->gdb_service->target == NULL)) {
893 target->gdb_service->target =
894 get_cortex_a(target, target->gdb_service->core[1]);
895 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
896 return retval;
897 }
898 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
899 armv7a->debug_base + CPUDBG_DSCR, &dscr);
900 if (retval != ERROR_OK)
901 return retval;
902 cortex_a->cpudbg_dscr = dscr;
903
904 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
905 if (prev_target_state != TARGET_HALTED) {
906 /* We have a halting debug event */
907 LOG_DEBUG("Target halted");
908 target->state = TARGET_HALTED;
909 if ((prev_target_state == TARGET_RUNNING)
910 || (prev_target_state == TARGET_UNKNOWN)
911 || (prev_target_state == TARGET_RESET)) {
912 retval = cortex_a_debug_entry(target);
913 if (retval != ERROR_OK)
914 return retval;
915 if (target->smp) {
916 retval = update_halt_gdb(target);
917 if (retval != ERROR_OK)
918 return retval;
919 }
920
921 if (arm_semihosting(target, &retval) != 0)
922 return retval;
923
924 target_call_event_callbacks(target,
925 TARGET_EVENT_HALTED);
926 }
927 if (prev_target_state == TARGET_DEBUG_RUNNING) {
928 LOG_DEBUG(" ");
929
930 retval = cortex_a_debug_entry(target);
931 if (retval != ERROR_OK)
932 return retval;
933 if (target->smp) {
934 retval = update_halt_gdb(target);
935 if (retval != ERROR_OK)
936 return retval;
937 }
938
939 target_call_event_callbacks(target,
940 TARGET_EVENT_DEBUG_HALTED);
941 }
942 }
943 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
944 target->state = TARGET_RUNNING;
945 else {
946 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
947 target->state = TARGET_UNKNOWN;
948 }
949
950 return retval;
951 }
952
953 static int cortex_a_halt(struct target *target)
954 {
955 int retval = ERROR_OK;
956 uint32_t dscr;
957 struct armv7a_common *armv7a = target_to_armv7a(target);
958
959 /*
960 * Tell the core to be halted by writing DRCR with 0x1
961 * and then wait for the core to be halted.
962 */
963 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
964 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
965 if (retval != ERROR_OK)
966 return retval;
967
968 /*
969 * enter halting debug mode
970 */
971 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
972 armv7a->debug_base + CPUDBG_DSCR, &dscr);
973 if (retval != ERROR_OK)
974 return retval;
975
976 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
977 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
978 if (retval != ERROR_OK)
979 return retval;
980
981 int64_t then = timeval_ms();
982 for (;; ) {
983 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
984 armv7a->debug_base + CPUDBG_DSCR, &dscr);
985 if (retval != ERROR_OK)
986 return retval;
987 if ((dscr & DSCR_CORE_HALTED) != 0)
988 break;
989 if (timeval_ms() > then + 1000) {
990 LOG_ERROR("Timeout waiting for halt");
991 return ERROR_FAIL;
992 }
993 }
994
995 target->debug_reason = DBG_REASON_DBGRQ;
996
997 return ERROR_OK;
998 }
999
1000 static int cortex_a_internal_restore(struct target *target, int current,
1001 uint32_t *address, int handle_breakpoints, int debug_execution)
1002 {
1003 struct armv7a_common *armv7a = target_to_armv7a(target);
1004 struct arm *arm = &armv7a->arm;
1005 int retval;
1006 uint32_t resume_pc;
1007
1008 if (!debug_execution)
1009 target_free_all_working_areas(target);
1010
1011 #if 0
1012 if (debug_execution) {
1013 /* Disable interrupts */
1014 /* We disable interrupts in the PRIMASK register instead of
1015 * masking with C_MASKINTS,
1016 * This is probably the same issue as Cortex-M3 Errata 377493:
1017 * C_MASKINTS in parallel with disabled interrupts can cause
1018 * local faults to not be taken. */
1019 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
1020 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
1021 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
1022
1023 /* Make sure we are in Thumb mode */
1024 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
1025 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
1026 32) | (1 << 24));
1027 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
1028 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
1029 }
1030 #endif
1031
1032 /* current = 1: continue on current pc, otherwise continue at <address> */
1033 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
1034 if (!current)
1035 resume_pc = *address;
1036 else
1037 *address = resume_pc;
1038
1039 /* Make sure that the Armv7 gdb thumb fixups does not
1040 * kill the return address
1041 */
1042 switch (arm->core_state) {
1043 case ARM_STATE_ARM:
1044 resume_pc &= 0xFFFFFFFC;
1045 break;
1046 case ARM_STATE_THUMB:
1047 case ARM_STATE_THUMB_EE:
1048 /* When the return address is loaded into PC
1049 * bit 0 must be 1 to stay in Thumb state
1050 */
1051 resume_pc |= 0x1;
1052 break;
1053 case ARM_STATE_JAZELLE:
1054 LOG_ERROR("How do I resume into Jazelle state??");
1055 return ERROR_FAIL;
1056 }
1057 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1058 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1059 arm->pc->dirty = 1;
1060 arm->pc->valid = 1;
1061
1062 /* restore dpm_mode at system halt */
1063 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1064 /* called it now before restoring context because it uses cpu
1065 * register r0 for restoring cp15 control register */
1066 retval = cortex_a_restore_cp15_control_reg(target);
1067 if (retval != ERROR_OK)
1068 return retval;
1069 retval = cortex_a_restore_context(target, handle_breakpoints);
1070 if (retval != ERROR_OK)
1071 return retval;
1072 target->debug_reason = DBG_REASON_NOTHALTED;
1073 target->state = TARGET_RUNNING;
1074
1075 /* registers are now invalid */
1076 register_cache_invalidate(arm->core_cache);
1077
1078 #if 0
1079 /* the front-end may request us not to handle breakpoints */
1080 if (handle_breakpoints) {
1081 /* Single step past breakpoint at current address */
1082 breakpoint = breakpoint_find(target, resume_pc);
1083 if (breakpoint) {
1084 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1085 cortex_m3_unset_breakpoint(target, breakpoint);
1086 cortex_m3_single_step_core(target);
1087 cortex_m3_set_breakpoint(target, breakpoint);
1088 }
1089 }
1090
1091 #endif
1092 return retval;
1093 }
1094
1095 static int cortex_a_internal_restart(struct target *target)
1096 {
1097 struct armv7a_common *armv7a = target_to_armv7a(target);
1098 struct arm *arm = &armv7a->arm;
1099 int retval;
1100 uint32_t dscr;
1101 /*
1102 * * Restart core and wait for it to be started. Clear ITRen and sticky
1103 * * exception flags: see ARMv7 ARM, C5.9.
1104 *
1105 * REVISIT: for single stepping, we probably want to
1106 * disable IRQs by default, with optional override...
1107 */
1108
1109 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1110 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1111 if (retval != ERROR_OK)
1112 return retval;
1113
1114 if ((dscr & DSCR_INSTR_COMP) == 0)
1115 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1116
1117 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1118 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1119 if (retval != ERROR_OK)
1120 return retval;
1121
1122 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1123 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1124 DRCR_CLEAR_EXCEPTIONS);
1125 if (retval != ERROR_OK)
1126 return retval;
1127
1128 int64_t then = timeval_ms();
1129 for (;; ) {
1130 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1131 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1132 if (retval != ERROR_OK)
1133 return retval;
1134 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1135 break;
1136 if (timeval_ms() > then + 1000) {
1137 LOG_ERROR("Timeout waiting for resume");
1138 return ERROR_FAIL;
1139 }
1140 }
1141
1142 target->debug_reason = DBG_REASON_NOTHALTED;
1143 target->state = TARGET_RUNNING;
1144
1145 /* registers are now invalid */
1146 register_cache_invalidate(arm->core_cache);
1147
1148 return ERROR_OK;
1149 }
1150
1151 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1152 {
1153 int retval = 0;
1154 struct target_list *head;
1155 struct target *curr;
1156 uint32_t address;
1157 head = target->head;
1158 while (head != (struct target_list *)NULL) {
1159 curr = head->target;
1160 if ((curr != target) && (curr->state != TARGET_RUNNING)
1161 && target_was_examined(curr)) {
1162 /* resume current address , not in step mode */
1163 retval += cortex_a_internal_restore(curr, 1, &address,
1164 handle_breakpoints, 0);
1165 retval += cortex_a_internal_restart(curr);
1166 }
1167 head = head->next;
1168
1169 }
1170 return retval;
1171 }
1172
1173 static int cortex_a_resume(struct target *target, int current,
1174 uint32_t address, int handle_breakpoints, int debug_execution)
1175 {
1176 int retval = 0;
1177 /* dummy resume for smp toggle in order to reduce gdb impact */
1178 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1179 /* simulate a start and halt of target */
1180 target->gdb_service->target = NULL;
1181 target->gdb_service->core[0] = target->gdb_service->core[1];
1182 /* fake resume at next poll we play the target core[1], see poll*/
1183 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1184 return 0;
1185 }
1186 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1187 if (target->smp) {
1188 target->gdb_service->core[0] = -1;
1189 retval = cortex_a_restore_smp(target, handle_breakpoints);
1190 if (retval != ERROR_OK)
1191 return retval;
1192 }
1193 cortex_a_internal_restart(target);
1194
1195 if (!debug_execution) {
1196 target->state = TARGET_RUNNING;
1197 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1198 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1199 } else {
1200 target->state = TARGET_DEBUG_RUNNING;
1201 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1202 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1203 }
1204
1205 return ERROR_OK;
1206 }
1207
1208 static int cortex_a_debug_entry(struct target *target)
1209 {
1210 int i;
1211 uint32_t regfile[16], cpsr, spsr, dscr;
1212 int retval = ERROR_OK;
1213 struct working_area *regfile_working_area = NULL;
1214 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1215 struct armv7a_common *armv7a = target_to_armv7a(target);
1216 struct arm *arm = &armv7a->arm;
1217 struct reg *reg;
1218
1219 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1220
1221 /* REVISIT surely we should not re-read DSCR !! */
1222 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1223 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1224 if (retval != ERROR_OK)
1225 return retval;
1226
1227 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1228 * imprecise data aborts get discarded by issuing a Data
1229 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1230 */
1231
1232 /* Enable the ITR execution once we are in debug mode */
1233 dscr |= DSCR_ITR_EN;
1234 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1235 armv7a->debug_base + CPUDBG_DSCR, dscr);
1236 if (retval != ERROR_OK)
1237 return retval;
1238
1239 /* Examine debug reason */
1240 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1241
1242 /* save address of instruction that triggered the watchpoint? */
1243 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1244 uint32_t wfar;
1245
1246 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1247 armv7a->debug_base + CPUDBG_WFAR,
1248 &wfar);
1249 if (retval != ERROR_OK)
1250 return retval;
1251 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1252 }
1253
1254 /* REVISIT fast_reg_read is never set ... */
1255
1256 /* Examine target state and mode */
1257 if (cortex_a->fast_reg_read)
1258 target_alloc_working_area(target, 64, &regfile_working_area);
1259
1260
1261 /* First load register acessible through core debug port*/
1262 if (!regfile_working_area)
1263 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1264 else {
1265 retval = cortex_a_read_regs_through_mem(target,
1266 regfile_working_area->address, regfile);
1267
1268 target_free_working_area(target, regfile_working_area);
1269 if (retval != ERROR_OK)
1270 return retval;
1271
1272 /* read Current PSR */
1273 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1274 /* store current cpsr */
1275 if (retval != ERROR_OK)
1276 return retval;
1277
1278 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1279
1280 arm_set_cpsr(arm, cpsr);
1281
1282 /* update cache */
1283 for (i = 0; i <= ARM_PC; i++) {
1284 reg = arm_reg_current(arm, i);
1285
1286 buf_set_u32(reg->value, 0, 32, regfile[i]);
1287 reg->valid = 1;
1288 reg->dirty = 0;
1289 }
1290
1291 /* Fixup PC Resume Address */
1292 if (cpsr & (1 << 5)) {
1293 /* T bit set for Thumb or ThumbEE state */
1294 regfile[ARM_PC] -= 4;
1295 } else {
1296 /* ARM state */
1297 regfile[ARM_PC] -= 8;
1298 }
1299
1300 reg = arm->pc;
1301 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1302 reg->dirty = reg->valid;
1303 }
1304
1305 /* read Saved PSR */
1306 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1307 /* store current spsr */
1308 if (retval != ERROR_OK)
1309 return retval;
1310
1311 reg = arm->spsr;
1312 buf_set_u32(reg->value, 0, 32, spsr);
1313 reg->valid = 1;
1314 reg->dirty = 0;
1315
1316 #if 0
1317 /* TODO, Move this */
1318 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1319 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1320 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1321
1322 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1323 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1324
1325 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1326 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1327 #endif
1328
1329 /* Are we in an exception handler */
1330 /* armv4_5->exception_number = 0; */
1331 if (armv7a->post_debug_entry) {
1332 retval = armv7a->post_debug_entry(target);
1333 if (retval != ERROR_OK)
1334 return retval;
1335 }
1336
1337 return retval;
1338 }
1339
1340 static int cortex_a_post_debug_entry(struct target *target)
1341 {
1342 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1343 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1344 int retval;
1345
1346 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1347 retval = armv7a->arm.mrc(target, 15,
1348 0, 0, /* op1, op2 */
1349 1, 0, /* CRn, CRm */
1350 &cortex_a->cp15_control_reg);
1351 if (retval != ERROR_OK)
1352 return retval;
1353 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1354 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1355
1356 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1357 armv7a_identify_cache(target);
1358
1359 if (armv7a->is_armv7r) {
1360 armv7a->armv7a_mmu.mmu_enabled = 0;
1361 } else {
1362 armv7a->armv7a_mmu.mmu_enabled =
1363 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1364 }
1365 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1366 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1367 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1368 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1369 cortex_a->curr_mode = armv7a->arm.core_mode;
1370
1371 /* switch to SVC mode to read DACR */
1372 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1373 armv7a->arm.mrc(target, 15,
1374 0, 0, 3, 0,
1375 &cortex_a->cp15_dacr_reg);
1376
1377 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1378 cortex_a->cp15_dacr_reg);
1379
1380 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1381 return ERROR_OK;
1382 }
1383
1384 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1385 {
1386 struct armv7a_common *armv7a = target_to_armv7a(target);
1387 uint32_t dscr;
1388
1389 /* Read DSCR */
1390 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1391 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1392 if (ERROR_OK != retval)
1393 return retval;
1394
1395 /* clear bitfield */
1396 dscr &= ~bit_mask;
1397 /* put new value */
1398 dscr |= value & bit_mask;
1399
1400 /* write new DSCR */
1401 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1402 armv7a->debug_base + CPUDBG_DSCR, dscr);
1403 return retval;
1404 }
1405
1406 static int cortex_a_step(struct target *target, int current, uint32_t address,
1407 int handle_breakpoints)
1408 {
1409 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1410 struct armv7a_common *armv7a = target_to_armv7a(target);
1411 struct arm *arm = &armv7a->arm;
1412 struct breakpoint *breakpoint = NULL;
1413 struct breakpoint stepbreakpoint;
1414 struct reg *r;
1415 int retval;
1416
1417 if (target->state != TARGET_HALTED) {
1418 LOG_WARNING("target not halted");
1419 return ERROR_TARGET_NOT_HALTED;
1420 }
1421
1422 /* current = 1: continue on current pc, otherwise continue at <address> */
1423 r = arm->pc;
1424 if (!current)
1425 buf_set_u32(r->value, 0, 32, address);
1426 else
1427 address = buf_get_u32(r->value, 0, 32);
1428
1429 /* The front-end may request us not to handle breakpoints.
1430 * But since Cortex-A uses breakpoint for single step,
1431 * we MUST handle breakpoints.
1432 */
1433 handle_breakpoints = 1;
1434 if (handle_breakpoints) {
1435 breakpoint = breakpoint_find(target, address);
1436 if (breakpoint)
1437 cortex_a_unset_breakpoint(target, breakpoint);
1438 }
1439
1440 /* Setup single step breakpoint */
1441 stepbreakpoint.address = address;
1442 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1443 ? 2 : 4;
1444 stepbreakpoint.type = BKPT_HARD;
1445 stepbreakpoint.set = 0;
1446
1447 /* Disable interrupts during single step if requested */
1448 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1449 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1450 if (ERROR_OK != retval)
1451 return retval;
1452 }
1453
1454 /* Break on IVA mismatch */
1455 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1456
1457 target->debug_reason = DBG_REASON_SINGLESTEP;
1458
1459 retval = cortex_a_resume(target, 1, address, 0, 0);
1460 if (retval != ERROR_OK)
1461 return retval;
1462
1463 int64_t then = timeval_ms();
1464 while (target->state != TARGET_HALTED) {
1465 retval = cortex_a_poll(target);
1466 if (retval != ERROR_OK)
1467 return retval;
1468 if (timeval_ms() > then + 1000) {
1469 LOG_ERROR("timeout waiting for target halt");
1470 return ERROR_FAIL;
1471 }
1472 }
1473
1474 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1475
1476 /* Re-enable interrupts if they were disabled */
1477 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1478 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1479 if (ERROR_OK != retval)
1480 return retval;
1481 }
1482
1483
1484 target->debug_reason = DBG_REASON_BREAKPOINT;
1485
1486 if (breakpoint)
1487 cortex_a_set_breakpoint(target, breakpoint, 0);
1488
1489 if (target->state != TARGET_HALTED)
1490 LOG_DEBUG("target stepped");
1491
1492 return ERROR_OK;
1493 }
1494
1495 static int cortex_a_restore_context(struct target *target, bool bpwp)
1496 {
1497 struct armv7a_common *armv7a = target_to_armv7a(target);
1498
1499 LOG_DEBUG(" ");
1500
1501 if (armv7a->pre_restore_context)
1502 armv7a->pre_restore_context(target);
1503
1504 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1505 }
1506
1507 /*
1508 * Cortex-A Breakpoint and watchpoint functions
1509 */
1510
1511 /* Setup hardware Breakpoint Register Pair */
1512 static int cortex_a_set_breakpoint(struct target *target,
1513 struct breakpoint *breakpoint, uint8_t matchmode)
1514 {
1515 int retval;
1516 int brp_i = 0;
1517 uint32_t control;
1518 uint8_t byte_addr_select = 0x0F;
1519 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1520 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1521 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1522
1523 if (breakpoint->set) {
1524 LOG_WARNING("breakpoint already set");
1525 return ERROR_OK;
1526 }
1527
1528 if (breakpoint->type == BKPT_HARD) {
1529 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1530 brp_i++;
1531 if (brp_i >= cortex_a->brp_num) {
1532 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1533 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1534 }
1535 breakpoint->set = brp_i + 1;
1536 if (breakpoint->length == 2)
1537 byte_addr_select = (3 << (breakpoint->address & 0x02));
1538 control = ((matchmode & 0x7) << 20)
1539 | (byte_addr_select << 5)
1540 | (3 << 1) | 1;
1541 brp_list[brp_i].used = 1;
1542 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1543 brp_list[brp_i].control = control;
1544 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1545 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1546 brp_list[brp_i].value);
1547 if (retval != ERROR_OK)
1548 return retval;
1549 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1550 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1551 brp_list[brp_i].control);
1552 if (retval != ERROR_OK)
1553 return retval;
1554 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1555 brp_list[brp_i].control,
1556 brp_list[brp_i].value);
1557 } else if (breakpoint->type == BKPT_SOFT) {
1558 uint8_t code[4];
1559 if (breakpoint->length == 2)
1560 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1561 else
1562 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1563 retval = target_read_memory(target,
1564 breakpoint->address & 0xFFFFFFFE,
1565 breakpoint->length, 1,
1566 breakpoint->orig_instr);
1567 if (retval != ERROR_OK)
1568 return retval;
1569
1570 /* make sure data cache is cleaned & invalidated down to PoC */
1571 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1572 armv7a_cache_flush_virt(target, breakpoint->address,
1573 breakpoint->length);
1574 }
1575
1576 retval = target_write_memory(target,
1577 breakpoint->address & 0xFFFFFFFE,
1578 breakpoint->length, 1, code);
1579 if (retval != ERROR_OK)
1580 return retval;
1581
1582 /* update i-cache at breakpoint location */
1583 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1584 breakpoint->length);
1585 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1586 breakpoint->length);
1587
1588 breakpoint->set = 0x11; /* Any nice value but 0 */
1589 }
1590
1591 return ERROR_OK;
1592 }
1593
1594 static int cortex_a_set_context_breakpoint(struct target *target,
1595 struct breakpoint *breakpoint, uint8_t matchmode)
1596 {
1597 int retval = ERROR_FAIL;
1598 int brp_i = 0;
1599 uint32_t control;
1600 uint8_t byte_addr_select = 0x0F;
1601 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1602 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1603 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1604
1605 if (breakpoint->set) {
1606 LOG_WARNING("breakpoint already set");
1607 return retval;
1608 }
1609 /*check available context BRPs*/
1610 while ((brp_list[brp_i].used ||
1611 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1612 brp_i++;
1613
1614 if (brp_i >= cortex_a->brp_num) {
1615 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1616 return ERROR_FAIL;
1617 }
1618
1619 breakpoint->set = brp_i + 1;
1620 control = ((matchmode & 0x7) << 20)
1621 | (byte_addr_select << 5)
1622 | (3 << 1) | 1;
1623 brp_list[brp_i].used = 1;
1624 brp_list[brp_i].value = (breakpoint->asid);
1625 brp_list[brp_i].control = control;
1626 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1627 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1628 brp_list[brp_i].value);
1629 if (retval != ERROR_OK)
1630 return retval;
1631 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1632 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1633 brp_list[brp_i].control);
1634 if (retval != ERROR_OK)
1635 return retval;
1636 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1637 brp_list[brp_i].control,
1638 brp_list[brp_i].value);
1639 return ERROR_OK;
1640
1641 }
1642
1643 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1644 {
1645 int retval = ERROR_FAIL;
1646 int brp_1 = 0; /* holds the contextID pair */
1647 int brp_2 = 0; /* holds the IVA pair */
1648 uint32_t control_CTX, control_IVA;
1649 uint8_t CTX_byte_addr_select = 0x0F;
1650 uint8_t IVA_byte_addr_select = 0x0F;
1651 uint8_t CTX_machmode = 0x03;
1652 uint8_t IVA_machmode = 0x01;
1653 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1654 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1655 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1656
1657 if (breakpoint->set) {
1658 LOG_WARNING("breakpoint already set");
1659 return retval;
1660 }
1661 /*check available context BRPs*/
1662 while ((brp_list[brp_1].used ||
1663 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1664 brp_1++;
1665
1666 printf("brp(CTX) found num: %d\n", brp_1);
1667 if (brp_1 >= cortex_a->brp_num) {
1668 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1669 return ERROR_FAIL;
1670 }
1671
1672 while ((brp_list[brp_2].used ||
1673 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1674 brp_2++;
1675
1676 printf("brp(IVA) found num: %d\n", brp_2);
1677 if (brp_2 >= cortex_a->brp_num) {
1678 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1679 return ERROR_FAIL;
1680 }
1681
1682 breakpoint->set = brp_1 + 1;
1683 breakpoint->linked_BRP = brp_2;
1684 control_CTX = ((CTX_machmode & 0x7) << 20)
1685 | (brp_2 << 16)
1686 | (0 << 14)
1687 | (CTX_byte_addr_select << 5)
1688 | (3 << 1) | 1;
1689 brp_list[brp_1].used = 1;
1690 brp_list[brp_1].value = (breakpoint->asid);
1691 brp_list[brp_1].control = control_CTX;
1692 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1693 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1694 brp_list[brp_1].value);
1695 if (retval != ERROR_OK)
1696 return retval;
1697 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1698 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1699 brp_list[brp_1].control);
1700 if (retval != ERROR_OK)
1701 return retval;
1702
1703 control_IVA = ((IVA_machmode & 0x7) << 20)
1704 | (brp_1 << 16)
1705 | (IVA_byte_addr_select << 5)
1706 | (3 << 1) | 1;
1707 brp_list[brp_2].used = 1;
1708 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1709 brp_list[brp_2].control = control_IVA;
1710 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1711 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1712 brp_list[brp_2].value);
1713 if (retval != ERROR_OK)
1714 return retval;
1715 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1716 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1717 brp_list[brp_2].control);
1718 if (retval != ERROR_OK)
1719 return retval;
1720
1721 return ERROR_OK;
1722 }
1723
1724 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1725 {
1726 int retval;
1727 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1728 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1729 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1730
1731 if (!breakpoint->set) {
1732 LOG_WARNING("breakpoint not set");
1733 return ERROR_OK;
1734 }
1735
1736 if (breakpoint->type == BKPT_HARD) {
1737 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1738 int brp_i = breakpoint->set - 1;
1739 int brp_j = breakpoint->linked_BRP;
1740 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1741 LOG_DEBUG("Invalid BRP number in breakpoint");
1742 return ERROR_OK;
1743 }
1744 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1745 brp_list[brp_i].control, brp_list[brp_i].value);
1746 brp_list[brp_i].used = 0;
1747 brp_list[brp_i].value = 0;
1748 brp_list[brp_i].control = 0;
1749 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1750 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1751 brp_list[brp_i].control);
1752 if (retval != ERROR_OK)
1753 return retval;
1754 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1755 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1756 brp_list[brp_i].value);
1757 if (retval != ERROR_OK)
1758 return retval;
1759 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1760 LOG_DEBUG("Invalid BRP number in breakpoint");
1761 return ERROR_OK;
1762 }
1763 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1764 brp_list[brp_j].control, brp_list[brp_j].value);
1765 brp_list[brp_j].used = 0;
1766 brp_list[brp_j].value = 0;
1767 brp_list[brp_j].control = 0;
1768 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1769 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1770 brp_list[brp_j].control);
1771 if (retval != ERROR_OK)
1772 return retval;
1773 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1774 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1775 brp_list[brp_j].value);
1776 if (retval != ERROR_OK)
1777 return retval;
1778 breakpoint->linked_BRP = 0;
1779 breakpoint->set = 0;
1780 return ERROR_OK;
1781
1782 } else {
1783 int brp_i = breakpoint->set - 1;
1784 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1785 LOG_DEBUG("Invalid BRP number in breakpoint");
1786 return ERROR_OK;
1787 }
1788 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1789 brp_list[brp_i].control, brp_list[brp_i].value);
1790 brp_list[brp_i].used = 0;
1791 brp_list[brp_i].value = 0;
1792 brp_list[brp_i].control = 0;
1793 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1794 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1795 brp_list[brp_i].control);
1796 if (retval != ERROR_OK)
1797 return retval;
1798 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1799 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1800 brp_list[brp_i].value);
1801 if (retval != ERROR_OK)
1802 return retval;
1803 breakpoint->set = 0;
1804 return ERROR_OK;
1805 }
1806 } else {
1807
1808 /* make sure data cache is cleaned & invalidated down to PoC */
1809 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1810 armv7a_cache_flush_virt(target, breakpoint->address,
1811 breakpoint->length);
1812 }
1813
1814 /* restore original instruction (kept in target endianness) */
1815 if (breakpoint->length == 4) {
1816 retval = target_write_memory(target,
1817 breakpoint->address & 0xFFFFFFFE,
1818 4, 1, breakpoint->orig_instr);
1819 if (retval != ERROR_OK)
1820 return retval;
1821 } else {
1822 retval = target_write_memory(target,
1823 breakpoint->address & 0xFFFFFFFE,
1824 2, 1, breakpoint->orig_instr);
1825 if (retval != ERROR_OK)
1826 return retval;
1827 }
1828
1829 /* update i-cache at breakpoint location */
1830 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1831 breakpoint->length);
1832 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1833 breakpoint->length);
1834 }
1835 breakpoint->set = 0;
1836
1837 return ERROR_OK;
1838 }
1839
1840 static int cortex_a_add_breakpoint(struct target *target,
1841 struct breakpoint *breakpoint)
1842 {
1843 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1844
1845 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1846 LOG_INFO("no hardware breakpoint available");
1847 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1848 }
1849
1850 if (breakpoint->type == BKPT_HARD)
1851 cortex_a->brp_num_available--;
1852
1853 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1854 }
1855
1856 static int cortex_a_add_context_breakpoint(struct target *target,
1857 struct breakpoint *breakpoint)
1858 {
1859 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1860
1861 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1862 LOG_INFO("no hardware breakpoint available");
1863 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1864 }
1865
1866 if (breakpoint->type == BKPT_HARD)
1867 cortex_a->brp_num_available--;
1868
1869 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1870 }
1871
1872 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1873 struct breakpoint *breakpoint)
1874 {
1875 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1876
1877 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1878 LOG_INFO("no hardware breakpoint available");
1879 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1880 }
1881
1882 if (breakpoint->type == BKPT_HARD)
1883 cortex_a->brp_num_available--;
1884
1885 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1886 }
1887
1888
1889 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1890 {
1891 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1892
1893 #if 0
1894 /* It is perfectly possible to remove breakpoints while the target is running */
1895 if (target->state != TARGET_HALTED) {
1896 LOG_WARNING("target not halted");
1897 return ERROR_TARGET_NOT_HALTED;
1898 }
1899 #endif
1900
1901 if (breakpoint->set) {
1902 cortex_a_unset_breakpoint(target, breakpoint);
1903 if (breakpoint->type == BKPT_HARD)
1904 cortex_a->brp_num_available++;
1905 }
1906
1907
1908 return ERROR_OK;
1909 }
1910
1911 /*
1912 * Cortex-A Reset functions
1913 */
1914
1915 static int cortex_a_assert_reset(struct target *target)
1916 {
1917 struct armv7a_common *armv7a = target_to_armv7a(target);
1918
1919 LOG_DEBUG(" ");
1920
1921 /* FIXME when halt is requested, make it work somehow... */
1922
1923 /* This function can be called in "target not examined" state */
1924
1925 /* Issue some kind of warm reset. */
1926 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1927 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1928 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1929 /* REVISIT handle "pulls" cases, if there's
1930 * hardware that needs them to work.
1931 */
1932 if (target->reset_halt)
1933 if (jtag_get_reset_config() & RESET_SRST_NO_GATING)
1934 jtag_add_reset(0, 1);
1935 } else {
1936 LOG_ERROR("%s: how to reset?", target_name(target));
1937 return ERROR_FAIL;
1938 }
1939
1940 /* registers are now invalid */
1941 if (target_was_examined(target))
1942 register_cache_invalidate(armv7a->arm.core_cache);
1943
1944 target->state = TARGET_RESET;
1945
1946 return ERROR_OK;
1947 }
1948
1949 static int cortex_a_deassert_reset(struct target *target)
1950 {
1951 int retval;
1952
1953 LOG_DEBUG(" ");
1954
1955 /* be certain SRST is off */
1956 jtag_add_reset(0, 0);
1957
1958 if (target_was_examined(target)) {
1959 retval = cortex_a_poll(target);
1960 if (retval != ERROR_OK)
1961 return retval;
1962 }
1963
1964 if (target->reset_halt) {
1965 if (target->state != TARGET_HALTED) {
1966 LOG_WARNING("%s: ran after reset and before halt ...",
1967 target_name(target));
1968 if (target_was_examined(target)) {
1969 retval = target_halt(target);
1970 if (retval != ERROR_OK)
1971 return retval;
1972 } else
1973 target->state = TARGET_UNKNOWN;
1974 }
1975 }
1976
1977 return ERROR_OK;
1978 }
1979
1980 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1981 {
1982 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1983 * New desired mode must be in mode. Current value of DSCR must be in
1984 * *dscr, which is updated with new value.
1985 *
1986 * This function elides actually sending the mode-change over the debug
1987 * interface if the mode is already set as desired.
1988 */
1989 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1990 if (new_dscr != *dscr) {
1991 struct armv7a_common *armv7a = target_to_armv7a(target);
1992 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1993 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1994 if (retval == ERROR_OK)
1995 *dscr = new_dscr;
1996 return retval;
1997 } else {
1998 return ERROR_OK;
1999 }
2000 }
2001
2002 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
2003 uint32_t value, uint32_t *dscr)
2004 {
2005 /* Waits until the specified bit(s) of DSCR take on a specified value. */
2006 struct armv7a_common *armv7a = target_to_armv7a(target);
2007 int64_t then = timeval_ms();
2008 int retval;
2009
2010 while ((*dscr & mask) != value) {
2011 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2012 armv7a->debug_base + CPUDBG_DSCR, dscr);
2013 if (retval != ERROR_OK)
2014 return retval;
2015 if (timeval_ms() > then + 1000) {
2016 LOG_ERROR("timeout waiting for DSCR bit change");
2017 return ERROR_FAIL;
2018 }
2019 }
2020 return ERROR_OK;
2021 }
2022
2023 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
2024 uint32_t *data, uint32_t *dscr)
2025 {
2026 int retval;
2027 struct armv7a_common *armv7a = target_to_armv7a(target);
2028
2029 /* Move from coprocessor to R0. */
2030 retval = cortex_a_exec_opcode(target, opcode, dscr);
2031 if (retval != ERROR_OK)
2032 return retval;
2033
2034 /* Move from R0 to DTRTX. */
2035 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2040 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2041 * must also check TXfull_l). Most of the time this will be free
2042 * because TXfull_l will be set immediately and cached in dscr. */
2043 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2044 DSCR_DTRTX_FULL_LATCHED, dscr);
2045 if (retval != ERROR_OK)
2046 return retval;
2047
2048 /* Read the value transferred to DTRTX. */
2049 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2050 armv7a->debug_base + CPUDBG_DTRTX, data);
2051 if (retval != ERROR_OK)
2052 return retval;
2053
2054 return ERROR_OK;
2055 }
2056
2057 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2058 uint32_t *dfsr, uint32_t *dscr)
2059 {
2060 int retval;
2061
2062 if (dfar) {
2063 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2064 if (retval != ERROR_OK)
2065 return retval;
2066 }
2067
2068 if (dfsr) {
2069 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2070 if (retval != ERROR_OK)
2071 return retval;
2072 }
2073
2074 return ERROR_OK;
2075 }
2076
2077 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2078 uint32_t data, uint32_t *dscr)
2079 {
2080 int retval;
2081 struct armv7a_common *armv7a = target_to_armv7a(target);
2082
2083 /* Write the value into DTRRX. */
2084 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2085 armv7a->debug_base + CPUDBG_DTRRX, data);
2086 if (retval != ERROR_OK)
2087 return retval;
2088
2089 /* Move from DTRRX to R0. */
2090 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2091 if (retval != ERROR_OK)
2092 return retval;
2093
2094 /* Move from R0 to coprocessor. */
2095 retval = cortex_a_exec_opcode(target, opcode, dscr);
2096 if (retval != ERROR_OK)
2097 return retval;
2098
2099 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2100 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2101 * check RXfull_l). Most of the time this will be free because RXfull_l
2102 * will be cleared immediately and cached in dscr. */
2103 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2104 if (retval != ERROR_OK)
2105 return retval;
2106
2107 return ERROR_OK;
2108 }
2109
2110 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2111 uint32_t dfsr, uint32_t *dscr)
2112 {
2113 int retval;
2114
2115 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2116 if (retval != ERROR_OK)
2117 return retval;
2118
2119 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2120 if (retval != ERROR_OK)
2121 return retval;
2122
2123 return ERROR_OK;
2124 }
2125
2126 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2127 {
2128 uint32_t status, upper4;
2129
2130 if (dfsr & (1 << 9)) {
2131 /* LPAE format. */
2132 status = dfsr & 0x3f;
2133 upper4 = status >> 2;
2134 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2135 return ERROR_TARGET_TRANSLATION_FAULT;
2136 else if (status == 33)
2137 return ERROR_TARGET_UNALIGNED_ACCESS;
2138 else
2139 return ERROR_TARGET_DATA_ABORT;
2140 } else {
2141 /* Normal format. */
2142 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2143 if (status == 1)
2144 return ERROR_TARGET_UNALIGNED_ACCESS;
2145 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2146 status == 9 || status == 11 || status == 13 || status == 15)
2147 return ERROR_TARGET_TRANSLATION_FAULT;
2148 else
2149 return ERROR_TARGET_DATA_ABORT;
2150 }
2151 }
2152
2153 static int cortex_a_write_cpu_memory_slow(struct target *target,
2154 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2155 {
2156 /* Writes count objects of size size from *buffer. Old value of DSCR must
2157 * be in *dscr; updated to new value. This is slow because it works for
2158 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2159 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2160 * preferred.
2161 * Preconditions:
2162 * - Address is in R0.
2163 * - R0 is marked dirty.
2164 */
2165 struct armv7a_common *armv7a = target_to_armv7a(target);
2166 struct arm *arm = &armv7a->arm;
2167 int retval;
2168
2169 /* Mark register R1 as dirty, to use for transferring data. */
2170 arm_reg_current(arm, 1)->dirty = true;
2171
2172 /* Switch to non-blocking mode if not already in that mode. */
2173 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2174 if (retval != ERROR_OK)
2175 return retval;
2176
2177 /* Go through the objects. */
2178 while (count) {
2179 /* Write the value to store into DTRRX. */
2180 uint32_t data, opcode;
2181 if (size == 1)
2182 data = *buffer;
2183 else if (size == 2)
2184 data = target_buffer_get_u16(target, buffer);
2185 else
2186 data = target_buffer_get_u32(target, buffer);
2187 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2188 armv7a->debug_base + CPUDBG_DTRRX, data);
2189 if (retval != ERROR_OK)
2190 return retval;
2191
2192 /* Transfer the value from DTRRX to R1. */
2193 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2194 if (retval != ERROR_OK)
2195 return retval;
2196
2197 /* Write the value transferred to R1 into memory. */
2198 if (size == 1)
2199 opcode = ARMV4_5_STRB_IP(1, 0);
2200 else if (size == 2)
2201 opcode = ARMV4_5_STRH_IP(1, 0);
2202 else
2203 opcode = ARMV4_5_STRW_IP(1, 0);
2204 retval = cortex_a_exec_opcode(target, opcode, dscr);
2205 if (retval != ERROR_OK)
2206 return retval;
2207
2208 /* Check for faults and return early. */
2209 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2210 return ERROR_OK; /* A data fault is not considered a system failure. */
2211
2212 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2213 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2214 * must also check RXfull_l). Most of the time this will be free
2215 * because RXfull_l will be cleared immediately and cached in dscr. */
2216 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2217 if (retval != ERROR_OK)
2218 return retval;
2219
2220 /* Advance. */
2221 buffer += size;
2222 --count;
2223 }
2224
2225 return ERROR_OK;
2226 }
2227
2228 static int cortex_a_write_cpu_memory_fast(struct target *target,
2229 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2230 {
2231 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2232 * in *dscr; updated to new value. This is fast but only works for
2233 * word-sized objects at aligned addresses.
2234 * Preconditions:
2235 * - Address is in R0 and must be a multiple of 4.
2236 * - R0 is marked dirty.
2237 */
2238 struct armv7a_common *armv7a = target_to_armv7a(target);
2239 int retval;
2240
2241 /* Switch to fast mode if not already in that mode. */
2242 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2243 if (retval != ERROR_OK)
2244 return retval;
2245
2246 /* Latch STC instruction. */
2247 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2248 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2249 if (retval != ERROR_OK)
2250 return retval;
2251
2252 /* Transfer all the data and issue all the instructions. */
2253 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2254 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2255 }
2256
2257 static int cortex_a_write_cpu_memory(struct target *target,
2258 uint32_t address, uint32_t size,
2259 uint32_t count, const uint8_t *buffer)
2260 {
2261 /* Write memory through the CPU. */
2262 int retval, final_retval;
2263 struct armv7a_common *armv7a = target_to_armv7a(target);
2264 struct arm *arm = &armv7a->arm;
2265 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2266
2267 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2268 address, size, count);
2269 if (target->state != TARGET_HALTED) {
2270 LOG_WARNING("target not halted");
2271 return ERROR_TARGET_NOT_HALTED;
2272 }
2273
2274 if (!count)
2275 return ERROR_OK;
2276
2277 /* Clear any abort. */
2278 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2279 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2280 if (retval != ERROR_OK)
2281 return retval;
2282
2283 /* Read DSCR. */
2284 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2285 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2286 if (retval != ERROR_OK)
2287 return retval;
2288
2289 /* Switch to non-blocking mode if not already in that mode. */
2290 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2291 if (retval != ERROR_OK)
2292 goto out;
2293
2294 /* Mark R0 as dirty. */
2295 arm_reg_current(arm, 0)->dirty = true;
2296
2297 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2298 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2299 if (retval != ERROR_OK)
2300 goto out;
2301
2302 /* Get the memory address into R0. */
2303 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2304 armv7a->debug_base + CPUDBG_DTRRX, address);
2305 if (retval != ERROR_OK)
2306 goto out;
2307 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2308 if (retval != ERROR_OK)
2309 goto out;
2310
2311 if (size == 4 && (address % 4) == 0) {
2312 /* We are doing a word-aligned transfer, so use fast mode. */
2313 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2314 } else {
2315 /* Use slow path. */
2316 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2317 }
2318
2319 out:
2320 final_retval = retval;
2321
2322 /* Switch to non-blocking mode if not already in that mode. */
2323 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2324 if (final_retval == ERROR_OK)
2325 final_retval = retval;
2326
2327 /* Wait for last issued instruction to complete. */
2328 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2329 if (final_retval == ERROR_OK)
2330 final_retval = retval;
2331
2332 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2333 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2334 * check RXfull_l). Most of the time this will be free because RXfull_l
2335 * will be cleared immediately and cached in dscr. However, don't do this
2336 * if there is fault, because then the instruction might not have completed
2337 * successfully. */
2338 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2339 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2340 if (retval != ERROR_OK)
2341 return retval;
2342 }
2343
2344 /* If there were any sticky abort flags, clear them. */
2345 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2346 fault_dscr = dscr;
2347 mem_ap_write_atomic_u32(armv7a->debug_ap,
2348 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2349 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2350 } else {
2351 fault_dscr = 0;
2352 }
2353
2354 /* Handle synchronous data faults. */
2355 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2356 if (final_retval == ERROR_OK) {
2357 /* Final return value will reflect cause of fault. */
2358 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2359 if (retval == ERROR_OK) {
2360 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2361 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2362 } else
2363 final_retval = retval;
2364 }
2365 /* Fault destroyed DFAR/DFSR; restore them. */
2366 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2367 if (retval != ERROR_OK)
2368 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2369 }
2370
2371 /* Handle asynchronous data faults. */
2372 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2373 if (final_retval == ERROR_OK)
2374 /* No other error has been recorded so far, so keep this one. */
2375 final_retval = ERROR_TARGET_DATA_ABORT;
2376 }
2377
2378 /* If the DCC is nonempty, clear it. */
2379 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2380 uint32_t dummy;
2381 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2382 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2383 if (final_retval == ERROR_OK)
2384 final_retval = retval;
2385 }
2386 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2387 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2388 if (final_retval == ERROR_OK)
2389 final_retval = retval;
2390 }
2391
2392 /* Done. */
2393 return final_retval;
2394 }
2395
2396 static int cortex_a_read_cpu_memory_slow(struct target *target,
2397 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2398 {
2399 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2400 * in *dscr; updated to new value. This is slow because it works for
2401 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2402 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2403 * preferred.
2404 * Preconditions:
2405 * - Address is in R0.
2406 * - R0 is marked dirty.
2407 */
2408 struct armv7a_common *armv7a = target_to_armv7a(target);
2409 struct arm *arm = &armv7a->arm;
2410 int retval;
2411
2412 /* Mark register R1 as dirty, to use for transferring data. */
2413 arm_reg_current(arm, 1)->dirty = true;
2414
2415 /* Switch to non-blocking mode if not already in that mode. */
2416 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2417 if (retval != ERROR_OK)
2418 return retval;
2419
2420 /* Go through the objects. */
2421 while (count) {
2422 /* Issue a load of the appropriate size to R1. */
2423 uint32_t opcode, data;
2424 if (size == 1)
2425 opcode = ARMV4_5_LDRB_IP(1, 0);
2426 else if (size == 2)
2427 opcode = ARMV4_5_LDRH_IP(1, 0);
2428 else
2429 opcode = ARMV4_5_LDRW_IP(1, 0);
2430 retval = cortex_a_exec_opcode(target, opcode, dscr);
2431 if (retval != ERROR_OK)
2432 return retval;
2433
2434 /* Issue a write of R1 to DTRTX. */
2435 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2436 if (retval != ERROR_OK)
2437 return retval;
2438
2439 /* Check for faults and return early. */
2440 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2441 return ERROR_OK; /* A data fault is not considered a system failure. */
2442
2443 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2444 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2445 * must also check TXfull_l). Most of the time this will be free
2446 * because TXfull_l will be set immediately and cached in dscr. */
2447 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2448 DSCR_DTRTX_FULL_LATCHED, dscr);
2449 if (retval != ERROR_OK)
2450 return retval;
2451
2452 /* Read the value transferred to DTRTX into the buffer. */
2453 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2454 armv7a->debug_base + CPUDBG_DTRTX, &data);
2455 if (retval != ERROR_OK)
2456 return retval;
2457 if (size == 1)
2458 *buffer = (uint8_t) data;
2459 else if (size == 2)
2460 target_buffer_set_u16(target, buffer, (uint16_t) data);
2461 else
2462 target_buffer_set_u32(target, buffer, data);
2463
2464 /* Advance. */
2465 buffer += size;
2466 --count;
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 static int cortex_a_read_cpu_memory_fast(struct target *target,
2473 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2474 {
2475 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2476 * *dscr; updated to new value. This is fast but only works for word-sized
2477 * objects at aligned addresses.
2478 * Preconditions:
2479 * - Address is in R0 and must be a multiple of 4.
2480 * - R0 is marked dirty.
2481 */
2482 struct armv7a_common *armv7a = target_to_armv7a(target);
2483 uint32_t u32;
2484 int retval;
2485
2486 /* Switch to non-blocking mode if not already in that mode. */
2487 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2488 if (retval != ERROR_OK)
2489 return retval;
2490
2491 /* Issue the LDC instruction via a write to ITR. */
2492 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2493 if (retval != ERROR_OK)
2494 return retval;
2495
2496 count--;
2497
2498 if (count > 0) {
2499 /* Switch to fast mode if not already in that mode. */
2500 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2501 if (retval != ERROR_OK)
2502 return retval;
2503
2504 /* Latch LDC instruction. */
2505 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2506 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2507 if (retval != ERROR_OK)
2508 return retval;
2509
2510 /* Read the value transferred to DTRTX into the buffer. Due to fast
2511 * mode rules, this blocks until the instruction finishes executing and
2512 * then reissues the read instruction to read the next word from
2513 * memory. The last read of DTRTX in this call reads the second-to-last
2514 * word from memory and issues the read instruction for the last word.
2515 */
2516 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2517 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2518 if (retval != ERROR_OK)
2519 return retval;
2520
2521 /* Advance. */
2522 buffer += count * 4;
2523 }
2524
2525 /* Wait for last issued instruction to complete. */
2526 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2527 if (retval != ERROR_OK)
2528 return retval;
2529
2530 /* Switch to non-blocking mode if not already in that mode. */
2531 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2532 if (retval != ERROR_OK)
2533 return retval;
2534
2535 /* Check for faults and return early. */
2536 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2537 return ERROR_OK; /* A data fault is not considered a system failure. */
2538
2539 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2540 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2541 * check TXfull_l). Most of the time this will be free because TXfull_l
2542 * will be set immediately and cached in dscr. */
2543 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2544 DSCR_DTRTX_FULL_LATCHED, dscr);
2545 if (retval != ERROR_OK)
2546 return retval;
2547
2548 /* Read the value transferred to DTRTX into the buffer. This is the last
2549 * word. */
2550 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2551 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2552 if (retval != ERROR_OK)
2553 return retval;
2554 target_buffer_set_u32(target, buffer, u32);
2555
2556 return ERROR_OK;
2557 }
2558
2559 static int cortex_a_read_cpu_memory(struct target *target,
2560 uint32_t address, uint32_t size,
2561 uint32_t count, uint8_t *buffer)
2562 {
2563 /* Read memory through the CPU. */
2564 int retval, final_retval;
2565 struct armv7a_common *armv7a = target_to_armv7a(target);
2566 struct arm *arm = &armv7a->arm;
2567 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2568
2569 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2570 address, size, count);
2571 if (target->state != TARGET_HALTED) {
2572 LOG_WARNING("target not halted");
2573 return ERROR_TARGET_NOT_HALTED;
2574 }
2575
2576 if (!count)
2577 return ERROR_OK;
2578
2579 /* Clear any abort. */
2580 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2581 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2582 if (retval != ERROR_OK)
2583 return retval;
2584
2585 /* Read DSCR */
2586 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2587 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2588 if (retval != ERROR_OK)
2589 return retval;
2590
2591 /* Switch to non-blocking mode if not already in that mode. */
2592 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2593 if (retval != ERROR_OK)
2594 goto out;
2595
2596 /* Mark R0 as dirty. */
2597 arm_reg_current(arm, 0)->dirty = true;
2598
2599 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2600 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2601 if (retval != ERROR_OK)
2602 goto out;
2603
2604 /* Get the memory address into R0. */
2605 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2606 armv7a->debug_base + CPUDBG_DTRRX, address);
2607 if (retval != ERROR_OK)
2608 goto out;
2609 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2610 if (retval != ERROR_OK)
2611 goto out;
2612
2613 if (size == 4 && (address % 4) == 0) {
2614 /* We are doing a word-aligned transfer, so use fast mode. */
2615 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2616 } else {
2617 /* Use slow path. */
2618 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2619 }
2620
2621 out:
2622 final_retval = retval;
2623
2624 /* Switch to non-blocking mode if not already in that mode. */
2625 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2626 if (final_retval == ERROR_OK)
2627 final_retval = retval;
2628
2629 /* Wait for last issued instruction to complete. */
2630 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2631 if (final_retval == ERROR_OK)
2632 final_retval = retval;
2633
2634 /* If there were any sticky abort flags, clear them. */
2635 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2636 fault_dscr = dscr;
2637 mem_ap_write_atomic_u32(armv7a->debug_ap,
2638 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2639 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2640 } else {
2641 fault_dscr = 0;
2642 }
2643
2644 /* Handle synchronous data faults. */
2645 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2646 if (final_retval == ERROR_OK) {
2647 /* Final return value will reflect cause of fault. */
2648 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2649 if (retval == ERROR_OK) {
2650 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2651 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2652 } else
2653 final_retval = retval;
2654 }
2655 /* Fault destroyed DFAR/DFSR; restore them. */
2656 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2657 if (retval != ERROR_OK)
2658 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2659 }
2660
2661 /* Handle asynchronous data faults. */
2662 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2663 if (final_retval == ERROR_OK)
2664 /* No other error has been recorded so far, so keep this one. */
2665 final_retval = ERROR_TARGET_DATA_ABORT;
2666 }
2667
2668 /* If the DCC is nonempty, clear it. */
2669 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2670 uint32_t dummy;
2671 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2672 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2673 if (final_retval == ERROR_OK)
2674 final_retval = retval;
2675 }
2676 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2677 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2678 if (final_retval == ERROR_OK)
2679 final_retval = retval;
2680 }
2681
2682 /* Done. */
2683 return final_retval;
2684 }
2685
2686
2687 /*
2688 * Cortex-A Memory access
2689 *
2690 * This is same Cortex-M3 but we must also use the correct
2691 * ap number for every access.
2692 */
2693
2694 static int cortex_a_read_phys_memory(struct target *target,
2695 uint32_t address, uint32_t size,
2696 uint32_t count, uint8_t *buffer)
2697 {
2698 struct armv7a_common *armv7a = target_to_armv7a(target);
2699 struct adiv5_dap *swjdp = armv7a->arm.dap;
2700 uint8_t apsel = swjdp->apsel;
2701 int retval;
2702
2703 if (!count || !buffer)
2704 return ERROR_COMMAND_SYNTAX_ERROR;
2705
2706 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2707 address, size, count);
2708
2709 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2710 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2711
2712 /* read memory through the CPU */
2713 cortex_a_prep_memaccess(target, 1);
2714 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2715 cortex_a_post_memaccess(target, 1);
2716
2717 return retval;
2718 }
2719
2720 static int cortex_a_read_memory(struct target *target, uint32_t address,
2721 uint32_t size, uint32_t count, uint8_t *buffer)
2722 {
2723 int retval;
2724
2725 /* cortex_a handles unaligned memory access */
2726 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2727 size, count);
2728
2729 cortex_a_prep_memaccess(target, 0);
2730 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2731 cortex_a_post_memaccess(target, 0);
2732
2733 return retval;
2734 }
2735
2736 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2737 uint32_t size, uint32_t count, uint8_t *buffer)
2738 {
2739 int mmu_enabled = 0;
2740 uint32_t virt, phys;
2741 int retval;
2742 struct armv7a_common *armv7a = target_to_armv7a(target);
2743 struct adiv5_dap *swjdp = armv7a->arm.dap;
2744 uint8_t apsel = swjdp->apsel;
2745
2746 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2747 return target_read_memory(target, address, size, count, buffer);
2748
2749 /* cortex_a handles unaligned memory access */
2750 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2751 size, count);
2752
2753 /* determine if MMU was enabled on target stop */
2754 if (!armv7a->is_armv7r) {
2755 retval = cortex_a_mmu(target, &mmu_enabled);
2756 if (retval != ERROR_OK)
2757 return retval;
2758 }
2759
2760 if (mmu_enabled) {
2761 virt = address;
2762 retval = cortex_a_virt2phys(target, virt, &phys);
2763 if (retval != ERROR_OK)
2764 return retval;
2765
2766 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2767 virt, phys);
2768 address = phys;
2769 }
2770
2771 if (!count || !buffer)
2772 return ERROR_COMMAND_SYNTAX_ERROR;
2773
2774 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2775
2776 return retval;
2777 }
2778
2779 static int cortex_a_write_phys_memory(struct target *target,
2780 uint32_t address, uint32_t size,
2781 uint32_t count, const uint8_t *buffer)
2782 {
2783 struct armv7a_common *armv7a = target_to_armv7a(target);
2784 struct adiv5_dap *swjdp = armv7a->arm.dap;
2785 uint8_t apsel = swjdp->apsel;
2786 int retval;
2787
2788 if (!count || !buffer)
2789 return ERROR_COMMAND_SYNTAX_ERROR;
2790
2791 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2792 size, count);
2793
2794 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2795 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2796
2797 /* write memory through the CPU */
2798 cortex_a_prep_memaccess(target, 1);
2799 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2800 cortex_a_post_memaccess(target, 1);
2801
2802 return retval;
2803 }
2804
2805 static int cortex_a_write_memory(struct target *target, uint32_t address,
2806 uint32_t size, uint32_t count, const uint8_t *buffer)
2807 {
2808 int retval;
2809
2810 /* cortex_a handles unaligned memory access */
2811 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2812 size, count);
2813
2814 /* memory writes bypass the caches, must flush before writing */
2815 armv7a_cache_auto_flush_on_write(target, address, size * count);
2816
2817 cortex_a_prep_memaccess(target, 0);
2818 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2819 cortex_a_post_memaccess(target, 0);
2820 return retval;
2821 }
2822
2823 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2824 uint32_t size, uint32_t count, const uint8_t *buffer)
2825 {
2826 int mmu_enabled = 0;
2827 uint32_t virt, phys;
2828 int retval;
2829 struct armv7a_common *armv7a = target_to_armv7a(target);
2830 struct adiv5_dap *swjdp = armv7a->arm.dap;
2831 uint8_t apsel = swjdp->apsel;
2832
2833 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2834 return target_write_memory(target, address, size, count, buffer);
2835
2836 /* cortex_a handles unaligned memory access */
2837 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2838 size, count);
2839
2840 /* determine if MMU was enabled on target stop */
2841 if (!armv7a->is_armv7r) {
2842 retval = cortex_a_mmu(target, &mmu_enabled);
2843 if (retval != ERROR_OK)
2844 return retval;
2845 }
2846
2847 if (mmu_enabled) {
2848 virt = address;
2849 retval = cortex_a_virt2phys(target, virt, &phys);
2850 if (retval != ERROR_OK)
2851 return retval;
2852
2853 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2854 virt,
2855 phys);
2856 address = phys;
2857 }
2858
2859 if (!count || !buffer)
2860 return ERROR_COMMAND_SYNTAX_ERROR;
2861
2862 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2863
2864 return retval;
2865 }
2866
2867 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2868 uint32_t count, uint8_t *buffer)
2869 {
2870 uint32_t size;
2871
2872 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2873 * will have something to do with the size we leave to it. */
2874 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2875 if (address & size) {
2876 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2877 if (retval != ERROR_OK)
2878 return retval;
2879 address += size;
2880 count -= size;
2881 buffer += size;
2882 }
2883 }
2884
2885 /* Read the data with as large access size as possible. */
2886 for (; size > 0; size /= 2) {
2887 uint32_t aligned = count - count % size;
2888 if (aligned > 0) {
2889 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2890 if (retval != ERROR_OK)
2891 return retval;
2892 address += aligned;
2893 count -= aligned;
2894 buffer += aligned;
2895 }
2896 }
2897
2898 return ERROR_OK;
2899 }
2900
2901 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2902 uint32_t count, const uint8_t *buffer)
2903 {
2904 uint32_t size;
2905
2906 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2907 * will have something to do with the size we leave to it. */
2908 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2909 if (address & size) {
2910 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2911 if (retval != ERROR_OK)
2912 return retval;
2913 address += size;
2914 count -= size;
2915 buffer += size;
2916 }
2917 }
2918
2919 /* Write the data with as large access size as possible. */
2920 for (; size > 0; size /= 2) {
2921 uint32_t aligned = count - count % size;
2922 if (aligned > 0) {
2923 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2924 if (retval != ERROR_OK)
2925 return retval;
2926 address += aligned;
2927 count -= aligned;
2928 buffer += aligned;
2929 }
2930 }
2931
2932 return ERROR_OK;
2933 }
2934
2935 static int cortex_a_handle_target_request(void *priv)
2936 {
2937 struct target *target = priv;
2938 struct armv7a_common *armv7a = target_to_armv7a(target);
2939 int retval;
2940
2941 if (!target_was_examined(target))
2942 return ERROR_OK;
2943 if (!target->dbg_msg_enabled)
2944 return ERROR_OK;
2945
2946 if (target->state == TARGET_RUNNING) {
2947 uint32_t request;
2948 uint32_t dscr;
2949 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2950 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2951
2952 /* check if we have data */
2953 int64_t then = timeval_ms();
2954 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2955 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2956 armv7a->debug_base + CPUDBG_DTRTX, &request);
2957 if (retval == ERROR_OK) {
2958 target_request(target, request);
2959 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2960 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2961 }
2962 if (timeval_ms() > then + 1000) {
2963 LOG_ERROR("Timeout waiting for dtr tx full");
2964 return ERROR_FAIL;
2965 }
2966 }
2967 }
2968
2969 return ERROR_OK;
2970 }
2971
2972 /*
2973 * Cortex-A target information and configuration
2974 */
2975
2976 static int cortex_a_examine_first(struct target *target)
2977 {
2978 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2979 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2980 struct adiv5_dap *swjdp = armv7a->arm.dap;
2981
2982 int i;
2983 int retval = ERROR_OK;
2984 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2985
2986 retval = dap_dp_init(swjdp);
2987 if (retval != ERROR_OK) {
2988 LOG_ERROR("Could not initialize the debug port");
2989 return retval;
2990 }
2991
2992 /* Search for the APB-AP - it is needed for access to debug registers */
2993 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2994 if (retval != ERROR_OK) {
2995 LOG_ERROR("Could not find APB-AP for debug access");
2996 return retval;
2997 }
2998
2999 retval = mem_ap_init(armv7a->debug_ap);
3000 if (retval != ERROR_OK) {
3001 LOG_ERROR("Could not initialize the APB-AP");
3002 return retval;
3003 }
3004
3005 armv7a->debug_ap->memaccess_tck = 80;
3006
3007 /* Search for the AHB-AB.
3008 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
3009 * can access system memory. */
3010 armv7a->memory_ap_available = false;
3011 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
3012 if (retval == ERROR_OK) {
3013 retval = mem_ap_init(armv7a->memory_ap);
3014 if (retval == ERROR_OK)
3015 armv7a->memory_ap_available = true;
3016 }
3017 if (retval != ERROR_OK) {
3018 /* AHB-AP not found or unavailable - use the CPU */
3019 LOG_DEBUG("No AHB-AP available for memory access");
3020 }
3021
3022 if (!target->dbgbase_set) {
3023 uint32_t dbgbase;
3024 /* Get ROM Table base */
3025 uint32_t apid;
3026 int32_t coreidx = target->coreid;
3027 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
3028 target->cmd_name);
3029 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
3030 if (retval != ERROR_OK)
3031 return retval;
3032 /* Lookup 0x15 -- Processor DAP */
3033 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
3034 &armv7a->debug_base, &coreidx);
3035 if (retval != ERROR_OK) {
3036 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
3037 target->cmd_name);
3038 return retval;
3039 }
3040 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
3041 target->coreid, armv7a->debug_base);
3042 } else
3043 armv7a->debug_base = target->dbgbase;
3044
3045 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3046 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3047 if (retval != ERROR_OK)
3048 return retval;
3049
3050 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3051 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3052 if (retval != ERROR_OK) {
3053 LOG_DEBUG("Examine %s failed", "CPUID");
3054 return retval;
3055 }
3056
3057 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3058 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
3059 if (retval != ERROR_OK) {
3060 LOG_DEBUG("Examine %s failed", "CTYPR");
3061 return retval;
3062 }
3063
3064 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3065 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
3066 if (retval != ERROR_OK) {
3067 LOG_DEBUG("Examine %s failed", "TTYPR");
3068 return retval;
3069 }
3070
3071 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3072 armv7a->debug_base + CPUDBG_DIDR, &didr);
3073 if (retval != ERROR_OK) {
3074 LOG_DEBUG("Examine %s failed", "DIDR");
3075 return retval;
3076 }
3077
3078 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3079 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
3080 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
3081 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3082
3083 cortex_a->cpuid = cpuid;
3084 cortex_a->ctypr = ctypr;
3085 cortex_a->ttypr = ttypr;
3086 cortex_a->didr = didr;
3087
3088 /* Unlocking the debug registers */
3089 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3090 CORTEX_A15_PARTNUM) {
3091
3092 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3093 armv7a->debug_base + CPUDBG_OSLAR,
3094 0);
3095
3096 if (retval != ERROR_OK)
3097 return retval;
3098
3099 }
3100 /* Unlocking the debug registers */
3101 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3102 CORTEX_A7_PARTNUM) {
3103
3104 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3105 armv7a->debug_base + CPUDBG_OSLAR,
3106 0);
3107
3108 if (retval != ERROR_OK)
3109 return retval;
3110
3111 }
3112 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3113 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3114
3115 if (retval != ERROR_OK)
3116 return retval;
3117
3118 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3119
3120 armv7a->arm.core_type = ARM_MODE_MON;
3121
3122 /* Avoid recreating the registers cache */
3123 if (!target_was_examined(target)) {
3124 retval = cortex_a_dpm_setup(cortex_a, didr);
3125 if (retval != ERROR_OK)
3126 return retval;
3127 }
3128
3129 /* Setup Breakpoint Register Pairs */
3130 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3131 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3132 cortex_a->brp_num_available = cortex_a->brp_num;
3133 free(cortex_a->brp_list);
3134 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3135 /* cortex_a->brb_enabled = ????; */
3136 for (i = 0; i < cortex_a->brp_num; i++) {
3137 cortex_a->brp_list[i].used = 0;
3138 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3139 cortex_a->brp_list[i].type = BRP_NORMAL;
3140 else
3141 cortex_a->brp_list[i].type = BRP_CONTEXT;
3142 cortex_a->brp_list[i].value = 0;
3143 cortex_a->brp_list[i].control = 0;
3144 cortex_a->brp_list[i].BRPn = i;
3145 }
3146
3147 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3148
3149 /* select debug_ap as default */
3150 swjdp->apsel = armv7a->debug_ap->ap_num;
3151
3152 target_set_examined(target);
3153 return ERROR_OK;
3154 }
3155
3156 static int cortex_a_examine(struct target *target)
3157 {
3158 int retval = ERROR_OK;
3159
3160 /* Reestablish communication after target reset */
3161 retval = cortex_a_examine_first(target);
3162
3163 /* Configure core debug access */
3164 if (retval == ERROR_OK)
3165 retval = cortex_a_init_debug_access(target);
3166
3167 return retval;
3168 }
3169
3170 /*
3171 * Cortex-A target creation and initialization
3172 */
3173
3174 static int cortex_a_init_target(struct command_context *cmd_ctx,
3175 struct target *target)
3176 {
3177 /* examine_first() does a bunch of this */
3178 return ERROR_OK;
3179 }
3180
3181 static int cortex_a_init_arch_info(struct target *target,
3182 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3183 {
3184 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3185
3186 /* Setup struct cortex_a_common */
3187 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3188
3189 /* tap has no dap initialized */
3190 if (!tap->dap) {
3191 tap->dap = dap_init();
3192
3193 /* Leave (only) generic DAP stuff for debugport_init() */
3194 tap->dap->tap = tap;
3195 }
3196
3197 armv7a->arm.dap = tap->dap;
3198
3199 cortex_a->fast_reg_read = 0;
3200
3201 /* register arch-specific functions */
3202 armv7a->examine_debug_reason = NULL;
3203
3204 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3205
3206 armv7a->pre_restore_context = NULL;
3207
3208 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3209
3210
3211 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3212
3213 /* REVISIT v7a setup should be in a v7a-specific routine */
3214 armv7a_init_arch_info(target, armv7a);
3215 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3216
3217 return ERROR_OK;
3218 }
3219
3220 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3221 {
3222 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3223
3224 cortex_a->armv7a_common.is_armv7r = false;
3225
3226 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3227 }
3228
3229 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3230 {
3231 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3232
3233 cortex_a->armv7a_common.is_armv7r = true;
3234
3235 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3236 }
3237
3238 static void cortex_a_deinit_target(struct target *target)
3239 {
3240 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3241 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3242
3243 free(cortex_a->brp_list);
3244 free(dpm->dbp);
3245 free(dpm->dwp);
3246 free(cortex_a);
3247 }
3248
3249 static int cortex_a_mmu(struct target *target, int *enabled)
3250 {
3251 struct armv7a_common *armv7a = target_to_armv7a(target);
3252
3253 if (target->state != TARGET_HALTED) {
3254 LOG_ERROR("%s: target not halted", __func__);
3255 return ERROR_TARGET_INVALID;
3256 }
3257
3258 if (armv7a->is_armv7r)
3259 *enabled = 0;
3260 else
3261 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3262
3263 return ERROR_OK;
3264 }
3265
3266 static int cortex_a_virt2phys(struct target *target,
3267 uint32_t virt, uint32_t *phys)
3268 {
3269 int retval = ERROR_FAIL;
3270 struct armv7a_common *armv7a = target_to_armv7a(target);
3271 struct adiv5_dap *swjdp = armv7a->arm.dap;
3272 uint8_t apsel = swjdp->apsel;
3273 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3274 uint32_t ret;
3275 retval = armv7a_mmu_translate_va(target,
3276 virt, &ret);
3277 if (retval != ERROR_OK)
3278 goto done;
3279 *phys = ret;
3280 } else {/* use this method if armv7a->memory_ap not selected
3281 * mmu must be enable in order to get a correct translation */
3282 retval = cortex_a_mmu_modify(target, 1);
3283 if (retval != ERROR_OK)
3284 goto done;
3285 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3286 }
3287 done:
3288 return retval;
3289 }
3290
3291 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3292 {
3293 struct target *target = get_current_target(CMD_CTX);
3294 struct armv7a_common *armv7a = target_to_armv7a(target);
3295
3296 return armv7a_handle_cache_info_command(CMD_CTX,
3297 &armv7a->armv7a_mmu.armv7a_cache);
3298 }
3299
3300
3301 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3302 {
3303 struct target *target = get_current_target(CMD_CTX);
3304 if (!target_was_examined(target)) {
3305 LOG_ERROR("target not examined yet");
3306 return ERROR_FAIL;
3307 }
3308
3309 return cortex_a_init_debug_access(target);
3310 }
3311 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3312 {
3313 struct target *target = get_current_target(CMD_CTX);
3314 /* check target is an smp target */
3315 struct target_list *head;
3316 struct target *curr;
3317 head = target->head;
3318 target->smp = 0;
3319 if (head != (struct target_list *)NULL) {
3320 while (head != (struct target_list *)NULL) {
3321 curr = head->target;
3322 curr->smp = 0;
3323 head = head->next;
3324 }
3325 /* fixes the target display to the debugger */
3326 target->gdb_service->target = target;
3327 }
3328 return ERROR_OK;
3329 }
3330
3331 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3332 {
3333 struct target *target = get_current_target(CMD_CTX);
3334 struct target_list *head;
3335 struct target *curr;
3336 head = target->head;
3337 if (head != (struct target_list *)NULL) {
3338 target->smp = 1;
3339 while (head != (struct target_list *)NULL) {
3340 curr = head->target;
3341 curr->smp = 1;
3342 head = head->next;
3343 }
3344 }
3345 return ERROR_OK;
3346 }
3347
3348 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3349 {
3350 struct target *target = get_current_target(CMD_CTX);
3351 int retval = ERROR_OK;
3352 struct target_list *head;
3353 head = target->head;
3354 if (head != (struct target_list *)NULL) {
3355 if (CMD_ARGC == 1) {
3356 int coreid = 0;
3357 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3358 if (ERROR_OK != retval)
3359 return retval;
3360 target->gdb_service->core[1] = coreid;
3361
3362 }
3363 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3364 , target->gdb_service->core[1]);
3365 }
3366 return ERROR_OK;
3367 }
3368
3369 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3370 {
3371 struct target *target = get_current_target(CMD_CTX);
3372 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3373
3374 static const Jim_Nvp nvp_maskisr_modes[] = {
3375 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3376 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3377 { .name = NULL, .value = -1 },
3378 };
3379 const Jim_Nvp *n;
3380
3381 if (CMD_ARGC > 0) {
3382 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3383 if (n->name == NULL) {
3384 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3385 return ERROR_COMMAND_SYNTAX_ERROR;
3386 }
3387
3388 cortex_a->isrmasking_mode = n->value;
3389 }
3390
3391 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3392 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3393
3394 return ERROR_OK;
3395 }
3396
3397 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3398 {
3399 struct target *target = get_current_target(CMD_CTX);
3400 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3401
3402 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3403 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3404 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3405 { .name = NULL, .value = -1 },
3406 };
3407 const Jim_Nvp *n;
3408
3409 if (CMD_ARGC > 0) {
3410 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3411 if (n->name == NULL)
3412 return ERROR_COMMAND_SYNTAX_ERROR;
3413 cortex_a->dacrfixup_mode = n->value;
3414
3415 }
3416
3417 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3418 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3419
3420 return ERROR_OK;
3421 }
3422
3423 static const struct command_registration cortex_a_exec_command_handlers[] = {
3424 {
3425 .name = "cache_info",
3426 .handler = cortex_a_handle_cache_info_command,
3427 .mode = COMMAND_EXEC,
3428 .help = "display information about target caches",
3429 .usage = "",
3430 },
3431 {
3432 .name = "dbginit",
3433 .handler = cortex_a_handle_dbginit_command,
3434 .mode = COMMAND_EXEC,
3435 .help = "Initialize core debug",
3436 .usage = "",
3437 },
3438 { .name = "smp_off",
3439 .handler = cortex_a_handle_smp_off_command,
3440 .mode = COMMAND_EXEC,
3441 .help = "Stop smp handling",
3442 .usage = "",},
3443 {
3444 .name = "smp_on",
3445 .handler = cortex_a_handle_smp_on_command,
3446 .mode = COMMAND_EXEC,
3447 .help = "Restart smp handling",
3448 .usage = "",
3449 },
3450 {
3451 .name = "smp_gdb",
3452 .handler = cortex_a_handle_smp_gdb_command,
3453 .mode = COMMAND_EXEC,
3454 .help = "display/fix current core played to gdb",
3455 .usage = "",
3456 },
3457 {
3458 .name = "maskisr",
3459 .handler = handle_cortex_a_mask_interrupts_command,
3460 .mode = COMMAND_ANY,
3461 .help = "mask cortex_a interrupts",
3462 .usage = "['on'|'off']",
3463 },
3464 {
3465 .name = "dacrfixup",
3466 .handler = handle_cortex_a_dacrfixup_command,
3467 .mode = COMMAND_EXEC,
3468 .help = "set domain access control (DACR) to all-manager "
3469 "on memory access",
3470 .usage = "['on'|'off']",
3471 },
3472
3473 COMMAND_REGISTRATION_DONE
3474 };
3475 static const struct command_registration cortex_a_command_handlers[] = {
3476 {
3477 .chain = arm_command_handlers,
3478 },
3479 {
3480 .chain = armv7a_command_handlers,
3481 },
3482 {
3483 .name = "cortex_a",
3484 .mode = COMMAND_ANY,
3485 .help = "Cortex-A command group",
3486 .usage = "",
3487 .chain = cortex_a_exec_command_handlers,
3488 },
3489 COMMAND_REGISTRATION_DONE
3490 };
3491
3492 struct target_type cortexa_target = {
3493 .name = "cortex_a",
3494 .deprecated_name = "cortex_a8",
3495
3496 .poll = cortex_a_poll,
3497 .arch_state = armv7a_arch_state,
3498
3499 .halt = cortex_a_halt,
3500 .resume = cortex_a_resume,
3501 .step = cortex_a_step,
3502
3503 .assert_reset = cortex_a_assert_reset,
3504 .deassert_reset = cortex_a_deassert_reset,
3505
3506 /* REVISIT allow exporting VFP3 registers ... */
3507 .get_gdb_reg_list = arm_get_gdb_reg_list,
3508
3509 .read_memory = cortex_a_read_memory,
3510 .write_memory = cortex_a_write_memory,
3511
3512 .read_buffer = cortex_a_read_buffer,
3513 .write_buffer = cortex_a_write_buffer,
3514
3515 .checksum_memory = arm_checksum_memory,
3516 .blank_check_memory = arm_blank_check_memory,
3517
3518 .run_algorithm = armv4_5_run_algorithm,
3519
3520 .add_breakpoint = cortex_a_add_breakpoint,
3521 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3522 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3523 .remove_breakpoint = cortex_a_remove_breakpoint,
3524 .add_watchpoint = NULL,
3525 .remove_watchpoint = NULL,
3526
3527 .commands = cortex_a_command_handlers,
3528 .target_create = cortex_a_target_create,
3529 .init_target = cortex_a_init_target,
3530 .examine = cortex_a_examine,
3531 .deinit_target = cortex_a_deinit_target,
3532
3533 .read_phys_memory = cortex_a_read_phys_memory,
3534 .write_phys_memory = cortex_a_write_phys_memory,
3535 .mmu = cortex_a_mmu,
3536 .virt2phys = cortex_a_virt2phys,
3537 };
3538
3539 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3540 {
3541 .name = "cache_info",
3542 .handler = cortex_a_handle_cache_info_command,
3543 .mode = COMMAND_EXEC,
3544 .help = "display information about target caches",
3545 .usage = "",
3546 },
3547 {
3548 .name = "dbginit",
3549 .handler = cortex_a_handle_dbginit_command,
3550 .mode = COMMAND_EXEC,
3551 .help = "Initialize core debug",
3552 .usage = "",
3553 },
3554 {
3555 .name = "maskisr",
3556 .handler = handle_cortex_a_mask_interrupts_command,
3557 .mode = COMMAND_EXEC,
3558 .help = "mask cortex_r4 interrupts",
3559 .usage = "['on'|'off']",
3560 },
3561
3562 COMMAND_REGISTRATION_DONE
3563 };
3564 static const struct command_registration cortex_r4_command_handlers[] = {
3565 {
3566 .chain = arm_command_handlers,
3567 },
3568 {
3569 .chain = armv7a_command_handlers,
3570 },
3571 {
3572 .name = "cortex_r4",
3573 .mode = COMMAND_ANY,
3574 .help = "Cortex-R4 command group",
3575 .usage = "",
3576 .chain = cortex_r4_exec_command_handlers,
3577 },
3578 COMMAND_REGISTRATION_DONE
3579 };
3580
3581 struct target_type cortexr4_target = {
3582 .name = "cortex_r4",
3583
3584 .poll = cortex_a_poll,
3585 .arch_state = armv7a_arch_state,
3586
3587 .halt = cortex_a_halt,
3588 .resume = cortex_a_resume,
3589 .step = cortex_a_step,
3590
3591 .assert_reset = cortex_a_assert_reset,
3592 .deassert_reset = cortex_a_deassert_reset,
3593
3594 /* REVISIT allow exporting VFP3 registers ... */
3595 .get_gdb_reg_list = arm_get_gdb_reg_list,
3596
3597 .read_memory = cortex_a_read_memory,
3598 .write_memory = cortex_a_write_memory,
3599
3600 .checksum_memory = arm_checksum_memory,
3601 .blank_check_memory = arm_blank_check_memory,
3602
3603 .run_algorithm = armv4_5_run_algorithm,
3604
3605 .add_breakpoint = cortex_a_add_breakpoint,
3606 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3607 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3608 .remove_breakpoint = cortex_a_remove_breakpoint,
3609 .add_watchpoint = NULL,
3610 .remove_watchpoint = NULL,
3611
3612 .commands = cortex_r4_command_handlers,
3613 .target_create = cortex_r4_target_create,
3614 .init_target = cortex_a_init_target,
3615 .examine = cortex_a_examine,
3616 .deinit_target = cortex_a_deinit_target,
3617 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)