semihosting armv7a: Add support for ARMv7-A
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include <helper/time_support.h>
58
59 static int cortex_a_poll(struct target *target);
60 static int cortex_a_debug_entry(struct target *target);
61 static int cortex_a_restore_context(struct target *target, bool bpwp);
62 static int cortex_a_set_breakpoint(struct target *target,
63 struct breakpoint *breakpoint, uint8_t matchmode);
64 static int cortex_a_set_context_breakpoint(struct target *target,
65 struct breakpoint *breakpoint, uint8_t matchmode);
66 static int cortex_a_set_hybrid_breakpoint(struct target *target,
67 struct breakpoint *breakpoint);
68 static int cortex_a_unset_breakpoint(struct target *target,
69 struct breakpoint *breakpoint);
70 static int cortex_a_dap_read_coreregister_u32(struct target *target,
71 uint32_t *value, int regnum);
72 static int cortex_a_dap_write_coreregister_u32(struct target *target,
73 uint32_t value, int regnum);
74 static int cortex_a_mmu(struct target *target, int *enabled);
75 static int cortex_a_mmu_modify(struct target *target, int enable);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_cpu_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /*
101 * Set up ARM core for memory access.
102 * If !phys_access, switch to SVC mode and make sure MMU is on
103 * If phys_access, switch off mmu
104 */
105 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
106 {
107 struct armv7a_common *armv7a = target_to_armv7a(target);
108 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
109 int mmu_enabled = 0;
110
111 if (phys_access == 0) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a_mmu(target, &mmu_enabled);
114 if (mmu_enabled)
115 cortex_a_mmu_modify(target, 1);
116 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
117 /* overwrite DACR to all-manager */
118 armv7a->arm.mcr(target, 15,
119 0, 0, 3, 0,
120 0xFFFFFFFF);
121 }
122 } else {
123 cortex_a_mmu(target, &mmu_enabled);
124 if (mmu_enabled)
125 cortex_a_mmu_modify(target, 0);
126 }
127 return ERROR_OK;
128 }
129
130 /*
131 * Restore ARM core after memory access.
132 * If !phys_access, switch to previous mode
133 * If phys_access, restore MMU setting
134 */
135 static int cortex_a_post_memaccess(struct target *target, int phys_access)
136 {
137 struct armv7a_common *armv7a = target_to_armv7a(target);
138 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
139
140 if (phys_access == 0) {
141 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
142 /* restore */
143 armv7a->arm.mcr(target, 15,
144 0, 0, 3, 0,
145 cortex_a->cp15_dacr_reg);
146 }
147 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
148 } else {
149 int mmu_enabled = 0;
150 cortex_a_mmu(target, &mmu_enabled);
151 if (mmu_enabled)
152 cortex_a_mmu_modify(target, 1);
153 }
154 return ERROR_OK;
155 }
156
157
158 /* modify cp15_control_reg in order to enable or disable mmu for :
159 * - virt2phys address conversion
160 * - read or write memory in phys or virt address */
161 static int cortex_a_mmu_modify(struct target *target, int enable)
162 {
163 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
164 struct armv7a_common *armv7a = target_to_armv7a(target);
165 int retval = ERROR_OK;
166 int need_write = 0;
167
168 if (enable) {
169 /* if mmu enabled at target stop and mmu not enable */
170 if (!(cortex_a->cp15_control_reg & 0x1U)) {
171 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
172 return ERROR_FAIL;
173 }
174 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
175 cortex_a->cp15_control_reg_curr |= 0x1U;
176 need_write = 1;
177 }
178 } else {
179 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
180 cortex_a->cp15_control_reg_curr &= ~0x1U;
181 need_write = 1;
182 }
183 }
184
185 if (need_write) {
186 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
187 enable ? "enable mmu" : "disable mmu",
188 cortex_a->cp15_control_reg_curr);
189
190 retval = armv7a->arm.mcr(target, 15,
191 0, 0, /* op1, op2 */
192 1, 0, /* CRn, CRm */
193 cortex_a->cp15_control_reg_curr);
194 }
195 return retval;
196 }
197
198 /*
199 * Cortex-A Basic debug access, very low level assumes state is saved
200 */
201 static int cortex_a8_init_debug_access(struct target *target)
202 {
203 struct armv7a_common *armv7a = target_to_armv7a(target);
204 int retval;
205
206 LOG_DEBUG(" ");
207
208 /* Unlocking the debug registers for modification
209 * The debugport might be uninitialised so try twice */
210 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
211 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
212 if (retval != ERROR_OK) {
213 /* try again */
214 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
215 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
216 if (retval == ERROR_OK)
217 LOG_USER(
218 "Locking debug access failed on first, but succeeded on second try.");
219 }
220
221 return retval;
222 }
223
224 /*
225 * Cortex-A Basic debug access, very low level assumes state is saved
226 */
227 static int cortex_a_init_debug_access(struct target *target)
228 {
229 struct armv7a_common *armv7a = target_to_armv7a(target);
230 int retval;
231 uint32_t dbg_osreg;
232 uint32_t cortex_part_num;
233 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
234
235 LOG_DEBUG(" ");
236 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
237 CORTEX_A_MIDR_PARTNUM_SHIFT;
238
239 switch (cortex_part_num) {
240 case CORTEX_A7_PARTNUM:
241 case CORTEX_A15_PARTNUM:
242 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
243 armv7a->debug_base + CPUDBG_OSLSR,
244 &dbg_osreg);
245 if (retval != ERROR_OK)
246 return retval;
247
248 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
249
250 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
251 /* Unlocking the DEBUG OS registers for modification */
252 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
253 armv7a->debug_base + CPUDBG_OSLAR,
254 0);
255 break;
256
257 case CORTEX_A5_PARTNUM:
258 case CORTEX_A8_PARTNUM:
259 case CORTEX_A9_PARTNUM:
260 default:
261 retval = cortex_a8_init_debug_access(target);
262 }
263
264 if (retval != ERROR_OK)
265 return retval;
266 /* Clear Sticky Power Down status Bit in PRSR to enable access to
267 the registers in the Core Power Domain */
268 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
269 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
270 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
271
272 if (retval != ERROR_OK)
273 return retval;
274
275 /* Disable cacheline fills and force cache write-through in debug state */
276 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
277 armv7a->debug_base + CPUDBG_DSCCR, 0);
278 if (retval != ERROR_OK)
279 return retval;
280
281 /* Disable TLB lookup and refill/eviction in debug state */
282 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
283 armv7a->debug_base + CPUDBG_DSMCR, 0);
284 if (retval != ERROR_OK)
285 return retval;
286
287 /* Enabling of instruction execution in debug mode is done in debug_entry code */
288
289 /* Resync breakpoint registers */
290
291 /* Since this is likely called from init or reset, update target state information*/
292 return cortex_a_poll(target);
293 }
294
295 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
296 {
297 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
298 * Writes final value of DSCR into *dscr. Pass force to force always
299 * reading DSCR at least once. */
300 struct armv7a_common *armv7a = target_to_armv7a(target);
301 int64_t then = timeval_ms();
302 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
303 force = false;
304 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
305 armv7a->debug_base + CPUDBG_DSCR, dscr);
306 if (retval != ERROR_OK) {
307 LOG_ERROR("Could not read DSCR register");
308 return retval;
309 }
310 if (timeval_ms() > then + 1000) {
311 LOG_ERROR("Timeout waiting for InstrCompl=1");
312 return ERROR_FAIL;
313 }
314 }
315 return ERROR_OK;
316 }
317
318 /* To reduce needless round-trips, pass in a pointer to the current
319 * DSCR value. Initialize it to zero if you just need to know the
320 * value on return from this function; or DSCR_INSTR_COMP if you
321 * happen to know that no instruction is pending.
322 */
323 static int cortex_a_exec_opcode(struct target *target,
324 uint32_t opcode, uint32_t *dscr_p)
325 {
326 uint32_t dscr;
327 int retval;
328 struct armv7a_common *armv7a = target_to_armv7a(target);
329
330 dscr = dscr_p ? *dscr_p : 0;
331
332 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
333
334 /* Wait for InstrCompl bit to be set */
335 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
336 if (retval != ERROR_OK)
337 return retval;
338
339 retval = mem_ap_write_u32(armv7a->debug_ap,
340 armv7a->debug_base + CPUDBG_ITR, opcode);
341 if (retval != ERROR_OK)
342 return retval;
343
344 int64_t then = timeval_ms();
345 do {
346 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
347 armv7a->debug_base + CPUDBG_DSCR, &dscr);
348 if (retval != ERROR_OK) {
349 LOG_ERROR("Could not read DSCR register");
350 return retval;
351 }
352 if (timeval_ms() > then + 1000) {
353 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
354 return ERROR_FAIL;
355 }
356 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
357
358 if (dscr_p)
359 *dscr_p = dscr;
360
361 return retval;
362 }
363
364 /**************************************************************************
365 Read core register with very few exec_opcode, fast but needs work_area.
366 This can cause problems with MMU active.
367 **************************************************************************/
368 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
369 uint32_t *regfile)
370 {
371 int retval = ERROR_OK;
372 struct armv7a_common *armv7a = target_to_armv7a(target);
373
374 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
375 if (retval != ERROR_OK)
376 return retval;
377 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
378 if (retval != ERROR_OK)
379 return retval;
380 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
381 if (retval != ERROR_OK)
382 return retval;
383
384 retval = mem_ap_read_buf(armv7a->memory_ap,
385 (uint8_t *)(&regfile[1]), 4, 15, address);
386
387 return retval;
388 }
389
390 static int cortex_a_dap_read_coreregister_u32(struct target *target,
391 uint32_t *value, int regnum)
392 {
393 int retval = ERROR_OK;
394 uint8_t reg = regnum&0xFF;
395 uint32_t dscr = 0;
396 struct armv7a_common *armv7a = target_to_armv7a(target);
397
398 if (reg > 17)
399 return retval;
400
401 if (reg < 15) {
402 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
403 retval = cortex_a_exec_opcode(target,
404 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
405 &dscr);
406 if (retval != ERROR_OK)
407 return retval;
408 } else if (reg == 15) {
409 /* "MOV r0, r15"; then move r0 to DCCTX */
410 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
411 if (retval != ERROR_OK)
412 return retval;
413 retval = cortex_a_exec_opcode(target,
414 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
415 &dscr);
416 if (retval != ERROR_OK)
417 return retval;
418 } else {
419 /* "MRS r0, CPSR" or "MRS r0, SPSR"
420 * then move r0 to DCCTX
421 */
422 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
423 if (retval != ERROR_OK)
424 return retval;
425 retval = cortex_a_exec_opcode(target,
426 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
427 &dscr);
428 if (retval != ERROR_OK)
429 return retval;
430 }
431
432 /* Wait for DTRRXfull then read DTRRTX */
433 int64_t then = timeval_ms();
434 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
435 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
436 armv7a->debug_base + CPUDBG_DSCR, &dscr);
437 if (retval != ERROR_OK)
438 return retval;
439 if (timeval_ms() > then + 1000) {
440 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
441 return ERROR_FAIL;
442 }
443 }
444
445 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
446 armv7a->debug_base + CPUDBG_DTRTX, value);
447 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
448
449 return retval;
450 }
451
452 static int cortex_a_dap_write_coreregister_u32(struct target *target,
453 uint32_t value, int regnum)
454 {
455 int retval = ERROR_OK;
456 uint8_t Rd = regnum&0xFF;
457 uint32_t dscr;
458 struct armv7a_common *armv7a = target_to_armv7a(target);
459
460 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
461
462 /* Check that DCCRX is not full */
463 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
464 armv7a->debug_base + CPUDBG_DSCR, &dscr);
465 if (retval != ERROR_OK)
466 return retval;
467 if (dscr & DSCR_DTR_RX_FULL) {
468 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
469 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
470 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
471 &dscr);
472 if (retval != ERROR_OK)
473 return retval;
474 }
475
476 if (Rd > 17)
477 return retval;
478
479 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
480 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
481 retval = mem_ap_write_u32(armv7a->debug_ap,
482 armv7a->debug_base + CPUDBG_DTRRX, value);
483 if (retval != ERROR_OK)
484 return retval;
485
486 if (Rd < 15) {
487 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
488 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
489 &dscr);
490
491 if (retval != ERROR_OK)
492 return retval;
493 } else if (Rd == 15) {
494 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
495 * then "mov r15, r0"
496 */
497 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
498 &dscr);
499 if (retval != ERROR_OK)
500 return retval;
501 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
502 if (retval != ERROR_OK)
503 return retval;
504 } else {
505 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
506 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
507 */
508 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
509 &dscr);
510 if (retval != ERROR_OK)
511 return retval;
512 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
513 &dscr);
514 if (retval != ERROR_OK)
515 return retval;
516
517 /* "Prefetch flush" after modifying execution status in CPSR */
518 if (Rd == 16) {
519 retval = cortex_a_exec_opcode(target,
520 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
521 &dscr);
522 if (retval != ERROR_OK)
523 return retval;
524 }
525 }
526
527 return retval;
528 }
529
530 /* Write to memory mapped registers directly with no cache or mmu handling */
531 static int cortex_a_dap_write_memap_register_u32(struct target *target,
532 uint32_t address,
533 uint32_t value)
534 {
535 int retval;
536 struct armv7a_common *armv7a = target_to_armv7a(target);
537
538 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
539
540 return retval;
541 }
542
543 /*
544 * Cortex-A implementation of Debug Programmer's Model
545 *
546 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
547 * so there's no need to poll for it before executing an instruction.
548 *
549 * NOTE that in several of these cases the "stall" mode might be useful.
550 * It'd let us queue a few operations together... prepare/finish might
551 * be the places to enable/disable that mode.
552 */
553
554 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
555 {
556 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
557 }
558
559 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
560 {
561 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
562 return mem_ap_write_u32(a->armv7a_common.debug_ap,
563 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
564 }
565
566 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
567 uint32_t *dscr_p)
568 {
569 uint32_t dscr = DSCR_INSTR_COMP;
570 int retval;
571
572 if (dscr_p)
573 dscr = *dscr_p;
574
575 /* Wait for DTRRXfull */
576 int64_t then = timeval_ms();
577 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
578 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
579 a->armv7a_common.debug_base + CPUDBG_DSCR,
580 &dscr);
581 if (retval != ERROR_OK)
582 return retval;
583 if (timeval_ms() > then + 1000) {
584 LOG_ERROR("Timeout waiting for read dcc");
585 return ERROR_FAIL;
586 }
587 }
588
589 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
590 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
591 if (retval != ERROR_OK)
592 return retval;
593 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
594
595 if (dscr_p)
596 *dscr_p = dscr;
597
598 return retval;
599 }
600
601 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
602 {
603 struct cortex_a_common *a = dpm_to_a(dpm);
604 uint32_t dscr;
605 int retval;
606
607 /* set up invariant: INSTR_COMP is set after ever DPM operation */
608 int64_t then = timeval_ms();
609 for (;; ) {
610 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
611 a->armv7a_common.debug_base + CPUDBG_DSCR,
612 &dscr);
613 if (retval != ERROR_OK)
614 return retval;
615 if ((dscr & DSCR_INSTR_COMP) != 0)
616 break;
617 if (timeval_ms() > then + 1000) {
618 LOG_ERROR("Timeout waiting for dpm prepare");
619 return ERROR_FAIL;
620 }
621 }
622
623 /* this "should never happen" ... */
624 if (dscr & DSCR_DTR_RX_FULL) {
625 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
626 /* Clear DCCRX */
627 retval = cortex_a_exec_opcode(
628 a->armv7a_common.arm.target,
629 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
630 &dscr);
631 if (retval != ERROR_OK)
632 return retval;
633 }
634
635 return retval;
636 }
637
638 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
639 {
640 /* REVISIT what could be done here? */
641 return ERROR_OK;
642 }
643
644 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
645 uint32_t opcode, uint32_t data)
646 {
647 struct cortex_a_common *a = dpm_to_a(dpm);
648 int retval;
649 uint32_t dscr = DSCR_INSTR_COMP;
650
651 retval = cortex_a_write_dcc(a, data);
652 if (retval != ERROR_OK)
653 return retval;
654
655 return cortex_a_exec_opcode(
656 a->armv7a_common.arm.target,
657 opcode,
658 &dscr);
659 }
660
661 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
662 uint32_t opcode, uint32_t data)
663 {
664 struct cortex_a_common *a = dpm_to_a(dpm);
665 uint32_t dscr = DSCR_INSTR_COMP;
666 int retval;
667
668 retval = cortex_a_write_dcc(a, data);
669 if (retval != ERROR_OK)
670 return retval;
671
672 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
673 retval = cortex_a_exec_opcode(
674 a->armv7a_common.arm.target,
675 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
676 &dscr);
677 if (retval != ERROR_OK)
678 return retval;
679
680 /* then the opcode, taking data from R0 */
681 retval = cortex_a_exec_opcode(
682 a->armv7a_common.arm.target,
683 opcode,
684 &dscr);
685
686 return retval;
687 }
688
689 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
690 {
691 struct target *target = dpm->arm->target;
692 uint32_t dscr = DSCR_INSTR_COMP;
693
694 /* "Prefetch flush" after modifying execution status in CPSR */
695 return cortex_a_exec_opcode(target,
696 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
697 &dscr);
698 }
699
700 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
701 uint32_t opcode, uint32_t *data)
702 {
703 struct cortex_a_common *a = dpm_to_a(dpm);
704 int retval;
705 uint32_t dscr = DSCR_INSTR_COMP;
706
707 /* the opcode, writing data to DCC */
708 retval = cortex_a_exec_opcode(
709 a->armv7a_common.arm.target,
710 opcode,
711 &dscr);
712 if (retval != ERROR_OK)
713 return retval;
714
715 return cortex_a_read_dcc(a, data, &dscr);
716 }
717
718
719 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
720 uint32_t opcode, uint32_t *data)
721 {
722 struct cortex_a_common *a = dpm_to_a(dpm);
723 uint32_t dscr = DSCR_INSTR_COMP;
724 int retval;
725
726 /* the opcode, writing data to R0 */
727 retval = cortex_a_exec_opcode(
728 a->armv7a_common.arm.target,
729 opcode,
730 &dscr);
731 if (retval != ERROR_OK)
732 return retval;
733
734 /* write R0 to DCC */
735 retval = cortex_a_exec_opcode(
736 a->armv7a_common.arm.target,
737 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
738 &dscr);
739 if (retval != ERROR_OK)
740 return retval;
741
742 return cortex_a_read_dcc(a, data, &dscr);
743 }
744
745 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
746 uint32_t addr, uint32_t control)
747 {
748 struct cortex_a_common *a = dpm_to_a(dpm);
749 uint32_t vr = a->armv7a_common.debug_base;
750 uint32_t cr = a->armv7a_common.debug_base;
751 int retval;
752
753 switch (index_t) {
754 case 0 ... 15: /* breakpoints */
755 vr += CPUDBG_BVR_BASE;
756 cr += CPUDBG_BCR_BASE;
757 break;
758 case 16 ... 31: /* watchpoints */
759 vr += CPUDBG_WVR_BASE;
760 cr += CPUDBG_WCR_BASE;
761 index_t -= 16;
762 break;
763 default:
764 return ERROR_FAIL;
765 }
766 vr += 4 * index_t;
767 cr += 4 * index_t;
768
769 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
770 (unsigned) vr, (unsigned) cr);
771
772 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
773 vr, addr);
774 if (retval != ERROR_OK)
775 return retval;
776 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
777 cr, control);
778 return retval;
779 }
780
781 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
782 {
783 struct cortex_a_common *a = dpm_to_a(dpm);
784 uint32_t cr;
785
786 switch (index_t) {
787 case 0 ... 15:
788 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
789 break;
790 case 16 ... 31:
791 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
792 index_t -= 16;
793 break;
794 default:
795 return ERROR_FAIL;
796 }
797 cr += 4 * index_t;
798
799 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
800
801 /* clear control register */
802 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
803 }
804
805 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
806 {
807 struct arm_dpm *dpm = &a->armv7a_common.dpm;
808 int retval;
809
810 dpm->arm = &a->armv7a_common.arm;
811 dpm->didr = didr;
812
813 dpm->prepare = cortex_a_dpm_prepare;
814 dpm->finish = cortex_a_dpm_finish;
815
816 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
817 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
818 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
819
820 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
821 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
822
823 dpm->bpwp_enable = cortex_a_bpwp_enable;
824 dpm->bpwp_disable = cortex_a_bpwp_disable;
825
826 retval = arm_dpm_setup(dpm);
827 if (retval == ERROR_OK)
828 retval = arm_dpm_initialize(dpm);
829
830 return retval;
831 }
832 static struct target *get_cortex_a(struct target *target, int32_t coreid)
833 {
834 struct target_list *head;
835 struct target *curr;
836
837 head = target->head;
838 while (head != (struct target_list *)NULL) {
839 curr = head->target;
840 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
841 return curr;
842 head = head->next;
843 }
844 return target;
845 }
846 static int cortex_a_halt(struct target *target);
847
848 static int cortex_a_halt_smp(struct target *target)
849 {
850 int retval = 0;
851 struct target_list *head;
852 struct target *curr;
853 head = target->head;
854 while (head != (struct target_list *)NULL) {
855 curr = head->target;
856 if ((curr != target) && (curr->state != TARGET_HALTED))
857 retval += cortex_a_halt(curr);
858 head = head->next;
859 }
860 return retval;
861 }
862
863 static int update_halt_gdb(struct target *target)
864 {
865 int retval = 0;
866 if (target->gdb_service && target->gdb_service->core[0] == -1) {
867 target->gdb_service->target = target;
868 target->gdb_service->core[0] = target->coreid;
869 retval += cortex_a_halt_smp(target);
870 }
871 return retval;
872 }
873
874 /*
875 * Cortex-A Run control
876 */
877
878 static int cortex_a_poll(struct target *target)
879 {
880 int retval = ERROR_OK;
881 uint32_t dscr;
882 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
883 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
884 enum target_state prev_target_state = target->state;
885 /* toggle to another core is done by gdb as follow */
886 /* maint packet J core_id */
887 /* continue */
888 /* the next polling trigger an halt event sent to gdb */
889 if ((target->state == TARGET_HALTED) && (target->smp) &&
890 (target->gdb_service) &&
891 (target->gdb_service->target == NULL)) {
892 target->gdb_service->target =
893 get_cortex_a(target, target->gdb_service->core[1]);
894 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
895 return retval;
896 }
897 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
898 armv7a->debug_base + CPUDBG_DSCR, &dscr);
899 if (retval != ERROR_OK)
900 return retval;
901 cortex_a->cpudbg_dscr = dscr;
902
903 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
904 if (prev_target_state != TARGET_HALTED) {
905 /* We have a halting debug event */
906 LOG_DEBUG("Target halted");
907 target->state = TARGET_HALTED;
908 if ((prev_target_state == TARGET_RUNNING)
909 || (prev_target_state == TARGET_UNKNOWN)
910 || (prev_target_state == TARGET_RESET)) {
911 retval = cortex_a_debug_entry(target);
912 if (retval != ERROR_OK)
913 return retval;
914 if (target->smp) {
915 retval = update_halt_gdb(target);
916 if (retval != ERROR_OK)
917 return retval;
918 }
919
920 if (arm_semihosting(target, &retval) != 0)
921 return retval;
922
923 target_call_event_callbacks(target,
924 TARGET_EVENT_HALTED);
925 }
926 if (prev_target_state == TARGET_DEBUG_RUNNING) {
927 LOG_DEBUG(" ");
928
929 retval = cortex_a_debug_entry(target);
930 if (retval != ERROR_OK)
931 return retval;
932 if (target->smp) {
933 retval = update_halt_gdb(target);
934 if (retval != ERROR_OK)
935 return retval;
936 }
937
938 target_call_event_callbacks(target,
939 TARGET_EVENT_DEBUG_HALTED);
940 }
941 }
942 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
943 target->state = TARGET_RUNNING;
944 else {
945 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
946 target->state = TARGET_UNKNOWN;
947 }
948
949 return retval;
950 }
951
952 static int cortex_a_halt(struct target *target)
953 {
954 int retval = ERROR_OK;
955 uint32_t dscr;
956 struct armv7a_common *armv7a = target_to_armv7a(target);
957
958 /*
959 * Tell the core to be halted by writing DRCR with 0x1
960 * and then wait for the core to be halted.
961 */
962 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
963 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
964 if (retval != ERROR_OK)
965 return retval;
966
967 /*
968 * enter halting debug mode
969 */
970 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
971 armv7a->debug_base + CPUDBG_DSCR, &dscr);
972 if (retval != ERROR_OK)
973 return retval;
974
975 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
976 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
977 if (retval != ERROR_OK)
978 return retval;
979
980 int64_t then = timeval_ms();
981 for (;; ) {
982 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
983 armv7a->debug_base + CPUDBG_DSCR, &dscr);
984 if (retval != ERROR_OK)
985 return retval;
986 if ((dscr & DSCR_CORE_HALTED) != 0)
987 break;
988 if (timeval_ms() > then + 1000) {
989 LOG_ERROR("Timeout waiting for halt");
990 return ERROR_FAIL;
991 }
992 }
993
994 target->debug_reason = DBG_REASON_DBGRQ;
995
996 return ERROR_OK;
997 }
998
999 static int cortex_a_internal_restore(struct target *target, int current,
1000 uint32_t *address, int handle_breakpoints, int debug_execution)
1001 {
1002 struct armv7a_common *armv7a = target_to_armv7a(target);
1003 struct arm *arm = &armv7a->arm;
1004 int retval;
1005 uint32_t resume_pc;
1006
1007 if (!debug_execution)
1008 target_free_all_working_areas(target);
1009
1010 #if 0
1011 if (debug_execution) {
1012 /* Disable interrupts */
1013 /* We disable interrupts in the PRIMASK register instead of
1014 * masking with C_MASKINTS,
1015 * This is probably the same issue as Cortex-M3 Errata 377493:
1016 * C_MASKINTS in parallel with disabled interrupts can cause
1017 * local faults to not be taken. */
1018 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
1019 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
1020 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
1021
1022 /* Make sure we are in Thumb mode */
1023 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
1024 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
1025 32) | (1 << 24));
1026 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
1027 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
1028 }
1029 #endif
1030
1031 /* current = 1: continue on current pc, otherwise continue at <address> */
1032 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
1033 if (!current)
1034 resume_pc = *address;
1035 else
1036 *address = resume_pc;
1037
1038 /* Make sure that the Armv7 gdb thumb fixups does not
1039 * kill the return address
1040 */
1041 switch (arm->core_state) {
1042 case ARM_STATE_ARM:
1043 resume_pc &= 0xFFFFFFFC;
1044 break;
1045 case ARM_STATE_THUMB:
1046 case ARM_STATE_THUMB_EE:
1047 /* When the return address is loaded into PC
1048 * bit 0 must be 1 to stay in Thumb state
1049 */
1050 resume_pc |= 0x1;
1051 break;
1052 case ARM_STATE_JAZELLE:
1053 LOG_ERROR("How do I resume into Jazelle state??");
1054 return ERROR_FAIL;
1055 }
1056 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1057 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1058 arm->pc->dirty = 1;
1059 arm->pc->valid = 1;
1060
1061 /* restore dpm_mode at system halt */
1062 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1063 /* called it now before restoring context because it uses cpu
1064 * register r0 for restoring cp15 control register */
1065 retval = cortex_a_restore_cp15_control_reg(target);
1066 if (retval != ERROR_OK)
1067 return retval;
1068 retval = cortex_a_restore_context(target, handle_breakpoints);
1069 if (retval != ERROR_OK)
1070 return retval;
1071 target->debug_reason = DBG_REASON_NOTHALTED;
1072 target->state = TARGET_RUNNING;
1073
1074 /* registers are now invalid */
1075 register_cache_invalidate(arm->core_cache);
1076
1077 #if 0
1078 /* the front-end may request us not to handle breakpoints */
1079 if (handle_breakpoints) {
1080 /* Single step past breakpoint at current address */
1081 breakpoint = breakpoint_find(target, resume_pc);
1082 if (breakpoint) {
1083 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1084 cortex_m3_unset_breakpoint(target, breakpoint);
1085 cortex_m3_single_step_core(target);
1086 cortex_m3_set_breakpoint(target, breakpoint);
1087 }
1088 }
1089
1090 #endif
1091 return retval;
1092 }
1093
1094 static int cortex_a_internal_restart(struct target *target)
1095 {
1096 struct armv7a_common *armv7a = target_to_armv7a(target);
1097 struct arm *arm = &armv7a->arm;
1098 int retval;
1099 uint32_t dscr;
1100 /*
1101 * * Restart core and wait for it to be started. Clear ITRen and sticky
1102 * * exception flags: see ARMv7 ARM, C5.9.
1103 *
1104 * REVISIT: for single stepping, we probably want to
1105 * disable IRQs by default, with optional override...
1106 */
1107
1108 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1109 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1110 if (retval != ERROR_OK)
1111 return retval;
1112
1113 if ((dscr & DSCR_INSTR_COMP) == 0)
1114 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1115
1116 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1117 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1118 if (retval != ERROR_OK)
1119 return retval;
1120
1121 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1122 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1123 DRCR_CLEAR_EXCEPTIONS);
1124 if (retval != ERROR_OK)
1125 return retval;
1126
1127 int64_t then = timeval_ms();
1128 for (;; ) {
1129 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1130 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1131 if (retval != ERROR_OK)
1132 return retval;
1133 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1134 break;
1135 if (timeval_ms() > then + 1000) {
1136 LOG_ERROR("Timeout waiting for resume");
1137 return ERROR_FAIL;
1138 }
1139 }
1140
1141 target->debug_reason = DBG_REASON_NOTHALTED;
1142 target->state = TARGET_RUNNING;
1143
1144 /* registers are now invalid */
1145 register_cache_invalidate(arm->core_cache);
1146
1147 return ERROR_OK;
1148 }
1149
1150 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1151 {
1152 int retval = 0;
1153 struct target_list *head;
1154 struct target *curr;
1155 uint32_t address;
1156 head = target->head;
1157 while (head != (struct target_list *)NULL) {
1158 curr = head->target;
1159 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1160 /* resume current address , not in step mode */
1161 retval += cortex_a_internal_restore(curr, 1, &address,
1162 handle_breakpoints, 0);
1163 retval += cortex_a_internal_restart(curr);
1164 }
1165 head = head->next;
1166
1167 }
1168 return retval;
1169 }
1170
1171 static int cortex_a_resume(struct target *target, int current,
1172 uint32_t address, int handle_breakpoints, int debug_execution)
1173 {
1174 int retval = 0;
1175 /* dummy resume for smp toggle in order to reduce gdb impact */
1176 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1177 /* simulate a start and halt of target */
1178 target->gdb_service->target = NULL;
1179 target->gdb_service->core[0] = target->gdb_service->core[1];
1180 /* fake resume at next poll we play the target core[1], see poll*/
1181 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1182 return 0;
1183 }
1184 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1185 if (target->smp) {
1186 target->gdb_service->core[0] = -1;
1187 retval = cortex_a_restore_smp(target, handle_breakpoints);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 }
1191 cortex_a_internal_restart(target);
1192
1193 if (!debug_execution) {
1194 target->state = TARGET_RUNNING;
1195 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1196 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1197 } else {
1198 target->state = TARGET_DEBUG_RUNNING;
1199 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1200 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1201 }
1202
1203 return ERROR_OK;
1204 }
1205
1206 static int cortex_a_debug_entry(struct target *target)
1207 {
1208 int i;
1209 uint32_t regfile[16], cpsr, spsr, dscr;
1210 int retval = ERROR_OK;
1211 struct working_area *regfile_working_area = NULL;
1212 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1213 struct armv7a_common *armv7a = target_to_armv7a(target);
1214 struct arm *arm = &armv7a->arm;
1215 struct reg *reg;
1216
1217 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1218
1219 /* REVISIT surely we should not re-read DSCR !! */
1220 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1221 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1222 if (retval != ERROR_OK)
1223 return retval;
1224
1225 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1226 * imprecise data aborts get discarded by issuing a Data
1227 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1228 */
1229
1230 /* Enable the ITR execution once we are in debug mode */
1231 dscr |= DSCR_ITR_EN;
1232 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1233 armv7a->debug_base + CPUDBG_DSCR, dscr);
1234 if (retval != ERROR_OK)
1235 return retval;
1236
1237 /* Examine debug reason */
1238 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1239
1240 /* save address of instruction that triggered the watchpoint? */
1241 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1242 uint32_t wfar;
1243
1244 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1245 armv7a->debug_base + CPUDBG_WFAR,
1246 &wfar);
1247 if (retval != ERROR_OK)
1248 return retval;
1249 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1250 }
1251
1252 /* REVISIT fast_reg_read is never set ... */
1253
1254 /* Examine target state and mode */
1255 if (cortex_a->fast_reg_read)
1256 target_alloc_working_area(target, 64, &regfile_working_area);
1257
1258
1259 /* First load register acessible through core debug port*/
1260 if (!regfile_working_area)
1261 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1262 else {
1263 retval = cortex_a_read_regs_through_mem(target,
1264 regfile_working_area->address, regfile);
1265
1266 target_free_working_area(target, regfile_working_area);
1267 if (retval != ERROR_OK)
1268 return retval;
1269
1270 /* read Current PSR */
1271 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1272 /* store current cpsr */
1273 if (retval != ERROR_OK)
1274 return retval;
1275
1276 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1277
1278 arm_set_cpsr(arm, cpsr);
1279
1280 /* update cache */
1281 for (i = 0; i <= ARM_PC; i++) {
1282 reg = arm_reg_current(arm, i);
1283
1284 buf_set_u32(reg->value, 0, 32, regfile[i]);
1285 reg->valid = 1;
1286 reg->dirty = 0;
1287 }
1288
1289 /* Fixup PC Resume Address */
1290 if (cpsr & (1 << 5)) {
1291 /* T bit set for Thumb or ThumbEE state */
1292 regfile[ARM_PC] -= 4;
1293 } else {
1294 /* ARM state */
1295 regfile[ARM_PC] -= 8;
1296 }
1297
1298 reg = arm->pc;
1299 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1300 reg->dirty = reg->valid;
1301 }
1302
1303 /* read Saved PSR */
1304 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1305 /* store current spsr */
1306 if (retval != ERROR_OK)
1307 return retval;
1308
1309 reg = arm->spsr;
1310 buf_set_u32(reg->value, 0, 32, spsr);
1311 reg->valid = 1;
1312 reg->dirty = 0;
1313
1314 #if 0
1315 /* TODO, Move this */
1316 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1317 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1318 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1319
1320 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1321 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1322
1323 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1324 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1325 #endif
1326
1327 /* Are we in an exception handler */
1328 /* armv4_5->exception_number = 0; */
1329 if (armv7a->post_debug_entry) {
1330 retval = armv7a->post_debug_entry(target);
1331 if (retval != ERROR_OK)
1332 return retval;
1333 }
1334
1335 return retval;
1336 }
1337
1338 static int cortex_a_post_debug_entry(struct target *target)
1339 {
1340 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1341 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1342 int retval;
1343
1344 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1345 retval = armv7a->arm.mrc(target, 15,
1346 0, 0, /* op1, op2 */
1347 1, 0, /* CRn, CRm */
1348 &cortex_a->cp15_control_reg);
1349 if (retval != ERROR_OK)
1350 return retval;
1351 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1352 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1353
1354 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1355 armv7a_identify_cache(target);
1356
1357 if (armv7a->is_armv7r) {
1358 armv7a->armv7a_mmu.mmu_enabled = 0;
1359 } else {
1360 armv7a->armv7a_mmu.mmu_enabled =
1361 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1362 }
1363 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1364 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1365 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1366 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1367 cortex_a->curr_mode = armv7a->arm.core_mode;
1368
1369 /* switch to SVC mode to read DACR */
1370 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1371 armv7a->arm.mrc(target, 15,
1372 0, 0, 3, 0,
1373 &cortex_a->cp15_dacr_reg);
1374
1375 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1376 cortex_a->cp15_dacr_reg);
1377
1378 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1379 return ERROR_OK;
1380 }
1381
1382 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1383 {
1384 struct armv7a_common *armv7a = target_to_armv7a(target);
1385 uint32_t dscr;
1386
1387 /* Read DSCR */
1388 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1389 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1390 if (ERROR_OK != retval)
1391 return retval;
1392
1393 /* clear bitfield */
1394 dscr &= ~bit_mask;
1395 /* put new value */
1396 dscr |= value & bit_mask;
1397
1398 /* write new DSCR */
1399 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1400 armv7a->debug_base + CPUDBG_DSCR, dscr);
1401 return retval;
1402 }
1403
1404 static int cortex_a_step(struct target *target, int current, uint32_t address,
1405 int handle_breakpoints)
1406 {
1407 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1408 struct armv7a_common *armv7a = target_to_armv7a(target);
1409 struct arm *arm = &armv7a->arm;
1410 struct breakpoint *breakpoint = NULL;
1411 struct breakpoint stepbreakpoint;
1412 struct reg *r;
1413 int retval;
1414
1415 if (target->state != TARGET_HALTED) {
1416 LOG_WARNING("target not halted");
1417 return ERROR_TARGET_NOT_HALTED;
1418 }
1419
1420 /* current = 1: continue on current pc, otherwise continue at <address> */
1421 r = arm->pc;
1422 if (!current)
1423 buf_set_u32(r->value, 0, 32, address);
1424 else
1425 address = buf_get_u32(r->value, 0, 32);
1426
1427 /* The front-end may request us not to handle breakpoints.
1428 * But since Cortex-A uses breakpoint for single step,
1429 * we MUST handle breakpoints.
1430 */
1431 handle_breakpoints = 1;
1432 if (handle_breakpoints) {
1433 breakpoint = breakpoint_find(target, address);
1434 if (breakpoint)
1435 cortex_a_unset_breakpoint(target, breakpoint);
1436 }
1437
1438 /* Setup single step breakpoint */
1439 stepbreakpoint.address = address;
1440 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1441 ? 2 : 4;
1442 stepbreakpoint.type = BKPT_HARD;
1443 stepbreakpoint.set = 0;
1444
1445 /* Disable interrupts during single step if requested */
1446 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1447 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1448 if (ERROR_OK != retval)
1449 return retval;
1450 }
1451
1452 /* Break on IVA mismatch */
1453 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1454
1455 target->debug_reason = DBG_REASON_SINGLESTEP;
1456
1457 retval = cortex_a_resume(target, 1, address, 0, 0);
1458 if (retval != ERROR_OK)
1459 return retval;
1460
1461 int64_t then = timeval_ms();
1462 while (target->state != TARGET_HALTED) {
1463 retval = cortex_a_poll(target);
1464 if (retval != ERROR_OK)
1465 return retval;
1466 if (timeval_ms() > then + 1000) {
1467 LOG_ERROR("timeout waiting for target halt");
1468 return ERROR_FAIL;
1469 }
1470 }
1471
1472 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1473
1474 /* Re-enable interrupts if they were disabled */
1475 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1476 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1477 if (ERROR_OK != retval)
1478 return retval;
1479 }
1480
1481
1482 target->debug_reason = DBG_REASON_BREAKPOINT;
1483
1484 if (breakpoint)
1485 cortex_a_set_breakpoint(target, breakpoint, 0);
1486
1487 if (target->state != TARGET_HALTED)
1488 LOG_DEBUG("target stepped");
1489
1490 return ERROR_OK;
1491 }
1492
1493 static int cortex_a_restore_context(struct target *target, bool bpwp)
1494 {
1495 struct armv7a_common *armv7a = target_to_armv7a(target);
1496
1497 LOG_DEBUG(" ");
1498
1499 if (armv7a->pre_restore_context)
1500 armv7a->pre_restore_context(target);
1501
1502 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1503 }
1504
1505 /*
1506 * Cortex-A Breakpoint and watchpoint functions
1507 */
1508
1509 /* Setup hardware Breakpoint Register Pair */
1510 static int cortex_a_set_breakpoint(struct target *target,
1511 struct breakpoint *breakpoint, uint8_t matchmode)
1512 {
1513 int retval;
1514 int brp_i = 0;
1515 uint32_t control;
1516 uint8_t byte_addr_select = 0x0F;
1517 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1518 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1519 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1520
1521 if (breakpoint->set) {
1522 LOG_WARNING("breakpoint already set");
1523 return ERROR_OK;
1524 }
1525
1526 if (breakpoint->type == BKPT_HARD) {
1527 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1528 brp_i++;
1529 if (brp_i >= cortex_a->brp_num) {
1530 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1531 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1532 }
1533 breakpoint->set = brp_i + 1;
1534 if (breakpoint->length == 2)
1535 byte_addr_select = (3 << (breakpoint->address & 0x02));
1536 control = ((matchmode & 0x7) << 20)
1537 | (byte_addr_select << 5)
1538 | (3 << 1) | 1;
1539 brp_list[brp_i].used = 1;
1540 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1541 brp_list[brp_i].control = control;
1542 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1543 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1544 brp_list[brp_i].value);
1545 if (retval != ERROR_OK)
1546 return retval;
1547 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1548 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1549 brp_list[brp_i].control);
1550 if (retval != ERROR_OK)
1551 return retval;
1552 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1553 brp_list[brp_i].control,
1554 brp_list[brp_i].value);
1555 } else if (breakpoint->type == BKPT_SOFT) {
1556 uint8_t code[4];
1557 if (breakpoint->length == 2)
1558 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1559 else
1560 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1561 retval = target_read_memory(target,
1562 breakpoint->address & 0xFFFFFFFE,
1563 breakpoint->length, 1,
1564 breakpoint->orig_instr);
1565 if (retval != ERROR_OK)
1566 return retval;
1567
1568 /* make sure data cache is cleaned & invalidated down to PoC */
1569 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1570 armv7a_cache_flush_virt(target, breakpoint->address,
1571 breakpoint->length);
1572 }
1573
1574 retval = target_write_memory(target,
1575 breakpoint->address & 0xFFFFFFFE,
1576 breakpoint->length, 1, code);
1577 if (retval != ERROR_OK)
1578 return retval;
1579
1580 /* update i-cache at breakpoint location */
1581 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1582 breakpoint->length);
1583 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1584 breakpoint->length);
1585
1586 breakpoint->set = 0x11; /* Any nice value but 0 */
1587 }
1588
1589 return ERROR_OK;
1590 }
1591
1592 static int cortex_a_set_context_breakpoint(struct target *target,
1593 struct breakpoint *breakpoint, uint8_t matchmode)
1594 {
1595 int retval = ERROR_FAIL;
1596 int brp_i = 0;
1597 uint32_t control;
1598 uint8_t byte_addr_select = 0x0F;
1599 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1600 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1601 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1602
1603 if (breakpoint->set) {
1604 LOG_WARNING("breakpoint already set");
1605 return retval;
1606 }
1607 /*check available context BRPs*/
1608 while ((brp_list[brp_i].used ||
1609 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1610 brp_i++;
1611
1612 if (brp_i >= cortex_a->brp_num) {
1613 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1614 return ERROR_FAIL;
1615 }
1616
1617 breakpoint->set = brp_i + 1;
1618 control = ((matchmode & 0x7) << 20)
1619 | (byte_addr_select << 5)
1620 | (3 << 1) | 1;
1621 brp_list[brp_i].used = 1;
1622 brp_list[brp_i].value = (breakpoint->asid);
1623 brp_list[brp_i].control = control;
1624 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1625 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1626 brp_list[brp_i].value);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1630 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1631 brp_list[brp_i].control);
1632 if (retval != ERROR_OK)
1633 return retval;
1634 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1635 brp_list[brp_i].control,
1636 brp_list[brp_i].value);
1637 return ERROR_OK;
1638
1639 }
1640
1641 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1642 {
1643 int retval = ERROR_FAIL;
1644 int brp_1 = 0; /* holds the contextID pair */
1645 int brp_2 = 0; /* holds the IVA pair */
1646 uint32_t control_CTX, control_IVA;
1647 uint8_t CTX_byte_addr_select = 0x0F;
1648 uint8_t IVA_byte_addr_select = 0x0F;
1649 uint8_t CTX_machmode = 0x03;
1650 uint8_t IVA_machmode = 0x01;
1651 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1652 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1653 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1654
1655 if (breakpoint->set) {
1656 LOG_WARNING("breakpoint already set");
1657 return retval;
1658 }
1659 /*check available context BRPs*/
1660 while ((brp_list[brp_1].used ||
1661 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1662 brp_1++;
1663
1664 printf("brp(CTX) found num: %d\n", brp_1);
1665 if (brp_1 >= cortex_a->brp_num) {
1666 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1667 return ERROR_FAIL;
1668 }
1669
1670 while ((brp_list[brp_2].used ||
1671 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1672 brp_2++;
1673
1674 printf("brp(IVA) found num: %d\n", brp_2);
1675 if (brp_2 >= cortex_a->brp_num) {
1676 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1677 return ERROR_FAIL;
1678 }
1679
1680 breakpoint->set = brp_1 + 1;
1681 breakpoint->linked_BRP = brp_2;
1682 control_CTX = ((CTX_machmode & 0x7) << 20)
1683 | (brp_2 << 16)
1684 | (0 << 14)
1685 | (CTX_byte_addr_select << 5)
1686 | (3 << 1) | 1;
1687 brp_list[brp_1].used = 1;
1688 brp_list[brp_1].value = (breakpoint->asid);
1689 brp_list[brp_1].control = control_CTX;
1690 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1691 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1692 brp_list[brp_1].value);
1693 if (retval != ERROR_OK)
1694 return retval;
1695 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1696 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1697 brp_list[brp_1].control);
1698 if (retval != ERROR_OK)
1699 return retval;
1700
1701 control_IVA = ((IVA_machmode & 0x7) << 20)
1702 | (brp_1 << 16)
1703 | (IVA_byte_addr_select << 5)
1704 | (3 << 1) | 1;
1705 brp_list[brp_2].used = 1;
1706 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1707 brp_list[brp_2].control = control_IVA;
1708 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1709 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1710 brp_list[brp_2].value);
1711 if (retval != ERROR_OK)
1712 return retval;
1713 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1714 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1715 brp_list[brp_2].control);
1716 if (retval != ERROR_OK)
1717 return retval;
1718
1719 return ERROR_OK;
1720 }
1721
1722 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1723 {
1724 int retval;
1725 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1726 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1727 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1728
1729 if (!breakpoint->set) {
1730 LOG_WARNING("breakpoint not set");
1731 return ERROR_OK;
1732 }
1733
1734 if (breakpoint->type == BKPT_HARD) {
1735 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1736 int brp_i = breakpoint->set - 1;
1737 int brp_j = breakpoint->linked_BRP;
1738 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1739 LOG_DEBUG("Invalid BRP number in breakpoint");
1740 return ERROR_OK;
1741 }
1742 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1743 brp_list[brp_i].control, brp_list[brp_i].value);
1744 brp_list[brp_i].used = 0;
1745 brp_list[brp_i].value = 0;
1746 brp_list[brp_i].control = 0;
1747 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1748 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1749 brp_list[brp_i].control);
1750 if (retval != ERROR_OK)
1751 return retval;
1752 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1753 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1754 brp_list[brp_i].value);
1755 if (retval != ERROR_OK)
1756 return retval;
1757 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1758 LOG_DEBUG("Invalid BRP number in breakpoint");
1759 return ERROR_OK;
1760 }
1761 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1762 brp_list[brp_j].control, brp_list[brp_j].value);
1763 brp_list[brp_j].used = 0;
1764 brp_list[brp_j].value = 0;
1765 brp_list[brp_j].control = 0;
1766 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1767 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1768 brp_list[brp_j].control);
1769 if (retval != ERROR_OK)
1770 return retval;
1771 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1772 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1773 brp_list[brp_j].value);
1774 if (retval != ERROR_OK)
1775 return retval;
1776 breakpoint->linked_BRP = 0;
1777 breakpoint->set = 0;
1778 return ERROR_OK;
1779
1780 } else {
1781 int brp_i = breakpoint->set - 1;
1782 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1783 LOG_DEBUG("Invalid BRP number in breakpoint");
1784 return ERROR_OK;
1785 }
1786 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1787 brp_list[brp_i].control, brp_list[brp_i].value);
1788 brp_list[brp_i].used = 0;
1789 brp_list[brp_i].value = 0;
1790 brp_list[brp_i].control = 0;
1791 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1792 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1793 brp_list[brp_i].control);
1794 if (retval != ERROR_OK)
1795 return retval;
1796 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1797 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1798 brp_list[brp_i].value);
1799 if (retval != ERROR_OK)
1800 return retval;
1801 breakpoint->set = 0;
1802 return ERROR_OK;
1803 }
1804 } else {
1805
1806 /* make sure data cache is cleaned & invalidated down to PoC */
1807 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1808 armv7a_cache_flush_virt(target, breakpoint->address,
1809 breakpoint->length);
1810 }
1811
1812 /* restore original instruction (kept in target endianness) */
1813 if (breakpoint->length == 4) {
1814 retval = target_write_memory(target,
1815 breakpoint->address & 0xFFFFFFFE,
1816 4, 1, breakpoint->orig_instr);
1817 if (retval != ERROR_OK)
1818 return retval;
1819 } else {
1820 retval = target_write_memory(target,
1821 breakpoint->address & 0xFFFFFFFE,
1822 2, 1, breakpoint->orig_instr);
1823 if (retval != ERROR_OK)
1824 return retval;
1825 }
1826
1827 /* update i-cache at breakpoint location */
1828 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1829 breakpoint->length);
1830 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1831 breakpoint->length);
1832 }
1833 breakpoint->set = 0;
1834
1835 return ERROR_OK;
1836 }
1837
1838 static int cortex_a_add_breakpoint(struct target *target,
1839 struct breakpoint *breakpoint)
1840 {
1841 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1842
1843 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1844 LOG_INFO("no hardware breakpoint available");
1845 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1846 }
1847
1848 if (breakpoint->type == BKPT_HARD)
1849 cortex_a->brp_num_available--;
1850
1851 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1852 }
1853
1854 static int cortex_a_add_context_breakpoint(struct target *target,
1855 struct breakpoint *breakpoint)
1856 {
1857 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1858
1859 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1860 LOG_INFO("no hardware breakpoint available");
1861 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1862 }
1863
1864 if (breakpoint->type == BKPT_HARD)
1865 cortex_a->brp_num_available--;
1866
1867 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1868 }
1869
1870 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1871 struct breakpoint *breakpoint)
1872 {
1873 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1874
1875 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1876 LOG_INFO("no hardware breakpoint available");
1877 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1878 }
1879
1880 if (breakpoint->type == BKPT_HARD)
1881 cortex_a->brp_num_available--;
1882
1883 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1884 }
1885
1886
1887 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1888 {
1889 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1890
1891 #if 0
1892 /* It is perfectly possible to remove breakpoints while the target is running */
1893 if (target->state != TARGET_HALTED) {
1894 LOG_WARNING("target not halted");
1895 return ERROR_TARGET_NOT_HALTED;
1896 }
1897 #endif
1898
1899 if (breakpoint->set) {
1900 cortex_a_unset_breakpoint(target, breakpoint);
1901 if (breakpoint->type == BKPT_HARD)
1902 cortex_a->brp_num_available++;
1903 }
1904
1905
1906 return ERROR_OK;
1907 }
1908
1909 /*
1910 * Cortex-A Reset functions
1911 */
1912
1913 static int cortex_a_assert_reset(struct target *target)
1914 {
1915 struct armv7a_common *armv7a = target_to_armv7a(target);
1916
1917 LOG_DEBUG(" ");
1918
1919 /* FIXME when halt is requested, make it work somehow... */
1920
1921 /* This function can be called in "target not examined" state */
1922
1923 /* Issue some kind of warm reset. */
1924 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1925 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1926 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1927 /* REVISIT handle "pulls" cases, if there's
1928 * hardware that needs them to work.
1929 */
1930 if (target->reset_halt)
1931 if (jtag_get_reset_config() & RESET_SRST_NO_GATING)
1932 jtag_add_reset(0, 1);
1933 } else {
1934 LOG_ERROR("%s: how to reset?", target_name(target));
1935 return ERROR_FAIL;
1936 }
1937
1938 /* registers are now invalid */
1939 register_cache_invalidate(armv7a->arm.core_cache);
1940
1941 target->state = TARGET_RESET;
1942
1943 return ERROR_OK;
1944 }
1945
1946 static int cortex_a_deassert_reset(struct target *target)
1947 {
1948 int retval;
1949
1950 LOG_DEBUG(" ");
1951
1952 /* be certain SRST is off */
1953 jtag_add_reset(0, 0);
1954
1955 retval = cortex_a_poll(target);
1956 if (retval != ERROR_OK)
1957 return retval;
1958
1959 if (target->reset_halt) {
1960 if (target->state != TARGET_HALTED) {
1961 LOG_WARNING("%s: ran after reset and before halt ...",
1962 target_name(target));
1963 retval = target_halt(target);
1964 if (retval != ERROR_OK)
1965 return retval;
1966 }
1967 }
1968
1969 return ERROR_OK;
1970 }
1971
1972 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1973 {
1974 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1975 * New desired mode must be in mode. Current value of DSCR must be in
1976 * *dscr, which is updated with new value.
1977 *
1978 * This function elides actually sending the mode-change over the debug
1979 * interface if the mode is already set as desired.
1980 */
1981 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1982 if (new_dscr != *dscr) {
1983 struct armv7a_common *armv7a = target_to_armv7a(target);
1984 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1985 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1986 if (retval == ERROR_OK)
1987 *dscr = new_dscr;
1988 return retval;
1989 } else {
1990 return ERROR_OK;
1991 }
1992 }
1993
1994 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1995 uint32_t value, uint32_t *dscr)
1996 {
1997 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1998 struct armv7a_common *armv7a = target_to_armv7a(target);
1999 int64_t then = timeval_ms();
2000 int retval;
2001
2002 while ((*dscr & mask) != value) {
2003 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2004 armv7a->debug_base + CPUDBG_DSCR, dscr);
2005 if (retval != ERROR_OK)
2006 return retval;
2007 if (timeval_ms() > then + 1000) {
2008 LOG_ERROR("timeout waiting for DSCR bit change");
2009 return ERROR_FAIL;
2010 }
2011 }
2012 return ERROR_OK;
2013 }
2014
2015 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
2016 uint32_t *data, uint32_t *dscr)
2017 {
2018 int retval;
2019 struct armv7a_common *armv7a = target_to_armv7a(target);
2020
2021 /* Move from coprocessor to R0. */
2022 retval = cortex_a_exec_opcode(target, opcode, dscr);
2023 if (retval != ERROR_OK)
2024 return retval;
2025
2026 /* Move from R0 to DTRTX. */
2027 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2028 if (retval != ERROR_OK)
2029 return retval;
2030
2031 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2032 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2033 * must also check TXfull_l). Most of the time this will be free
2034 * because TXfull_l will be set immediately and cached in dscr. */
2035 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2036 DSCR_DTRTX_FULL_LATCHED, dscr);
2037 if (retval != ERROR_OK)
2038 return retval;
2039
2040 /* Read the value transferred to DTRTX. */
2041 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2042 armv7a->debug_base + CPUDBG_DTRTX, data);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 return ERROR_OK;
2047 }
2048
2049 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2050 uint32_t *dfsr, uint32_t *dscr)
2051 {
2052 int retval;
2053
2054 if (dfar) {
2055 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2056 if (retval != ERROR_OK)
2057 return retval;
2058 }
2059
2060 if (dfsr) {
2061 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2062 if (retval != ERROR_OK)
2063 return retval;
2064 }
2065
2066 return ERROR_OK;
2067 }
2068
2069 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2070 uint32_t data, uint32_t *dscr)
2071 {
2072 int retval;
2073 struct armv7a_common *armv7a = target_to_armv7a(target);
2074
2075 /* Write the value into DTRRX. */
2076 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2077 armv7a->debug_base + CPUDBG_DTRRX, data);
2078 if (retval != ERROR_OK)
2079 return retval;
2080
2081 /* Move from DTRRX to R0. */
2082 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2083 if (retval != ERROR_OK)
2084 return retval;
2085
2086 /* Move from R0 to coprocessor. */
2087 retval = cortex_a_exec_opcode(target, opcode, dscr);
2088 if (retval != ERROR_OK)
2089 return retval;
2090
2091 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2092 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2093 * check RXfull_l). Most of the time this will be free because RXfull_l
2094 * will be cleared immediately and cached in dscr. */
2095 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2096 if (retval != ERROR_OK)
2097 return retval;
2098
2099 return ERROR_OK;
2100 }
2101
2102 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2103 uint32_t dfsr, uint32_t *dscr)
2104 {
2105 int retval;
2106
2107 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2108 if (retval != ERROR_OK)
2109 return retval;
2110
2111 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2112 if (retval != ERROR_OK)
2113 return retval;
2114
2115 return ERROR_OK;
2116 }
2117
2118 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2119 {
2120 uint32_t status, upper4;
2121
2122 if (dfsr & (1 << 9)) {
2123 /* LPAE format. */
2124 status = dfsr & 0x3f;
2125 upper4 = status >> 2;
2126 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2127 return ERROR_TARGET_TRANSLATION_FAULT;
2128 else if (status == 33)
2129 return ERROR_TARGET_UNALIGNED_ACCESS;
2130 else
2131 return ERROR_TARGET_DATA_ABORT;
2132 } else {
2133 /* Normal format. */
2134 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2135 if (status == 1)
2136 return ERROR_TARGET_UNALIGNED_ACCESS;
2137 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2138 status == 9 || status == 11 || status == 13 || status == 15)
2139 return ERROR_TARGET_TRANSLATION_FAULT;
2140 else
2141 return ERROR_TARGET_DATA_ABORT;
2142 }
2143 }
2144
2145 static int cortex_a_write_cpu_memory_slow(struct target *target,
2146 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2147 {
2148 /* Writes count objects of size size from *buffer. Old value of DSCR must
2149 * be in *dscr; updated to new value. This is slow because it works for
2150 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2151 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2152 * preferred.
2153 * Preconditions:
2154 * - Address is in R0.
2155 * - R0 is marked dirty.
2156 */
2157 struct armv7a_common *armv7a = target_to_armv7a(target);
2158 struct arm *arm = &armv7a->arm;
2159 int retval;
2160
2161 /* Mark register R1 as dirty, to use for transferring data. */
2162 arm_reg_current(arm, 1)->dirty = true;
2163
2164 /* Switch to non-blocking mode if not already in that mode. */
2165 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2166 if (retval != ERROR_OK)
2167 return retval;
2168
2169 /* Go through the objects. */
2170 while (count) {
2171 /* Write the value to store into DTRRX. */
2172 uint32_t data, opcode;
2173 if (size == 1)
2174 data = *buffer;
2175 else if (size == 2)
2176 data = target_buffer_get_u16(target, buffer);
2177 else
2178 data = target_buffer_get_u32(target, buffer);
2179 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2180 armv7a->debug_base + CPUDBG_DTRRX, data);
2181 if (retval != ERROR_OK)
2182 return retval;
2183
2184 /* Transfer the value from DTRRX to R1. */
2185 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2186 if (retval != ERROR_OK)
2187 return retval;
2188
2189 /* Write the value transferred to R1 into memory. */
2190 if (size == 1)
2191 opcode = ARMV4_5_STRB_IP(1, 0);
2192 else if (size == 2)
2193 opcode = ARMV4_5_STRH_IP(1, 0);
2194 else
2195 opcode = ARMV4_5_STRW_IP(1, 0);
2196 retval = cortex_a_exec_opcode(target, opcode, dscr);
2197 if (retval != ERROR_OK)
2198 return retval;
2199
2200 /* Check for faults and return early. */
2201 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2202 return ERROR_OK; /* A data fault is not considered a system failure. */
2203
2204 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2205 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2206 * must also check RXfull_l). Most of the time this will be free
2207 * because RXfull_l will be cleared immediately and cached in dscr. */
2208 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2209 if (retval != ERROR_OK)
2210 return retval;
2211
2212 /* Advance. */
2213 buffer += size;
2214 --count;
2215 }
2216
2217 return ERROR_OK;
2218 }
2219
2220 static int cortex_a_write_cpu_memory_fast(struct target *target,
2221 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2222 {
2223 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2224 * in *dscr; updated to new value. This is fast but only works for
2225 * word-sized objects at aligned addresses.
2226 * Preconditions:
2227 * - Address is in R0 and must be a multiple of 4.
2228 * - R0 is marked dirty.
2229 */
2230 struct armv7a_common *armv7a = target_to_armv7a(target);
2231 int retval;
2232
2233 /* Switch to fast mode if not already in that mode. */
2234 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2235 if (retval != ERROR_OK)
2236 return retval;
2237
2238 /* Latch STC instruction. */
2239 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2240 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2241 if (retval != ERROR_OK)
2242 return retval;
2243
2244 /* Transfer all the data and issue all the instructions. */
2245 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2246 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2247 }
2248
2249 static int cortex_a_write_cpu_memory(struct target *target,
2250 uint32_t address, uint32_t size,
2251 uint32_t count, const uint8_t *buffer)
2252 {
2253 /* Write memory through the CPU. */
2254 int retval, final_retval;
2255 struct armv7a_common *armv7a = target_to_armv7a(target);
2256 struct arm *arm = &armv7a->arm;
2257 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2258
2259 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2260 address, size, count);
2261 if (target->state != TARGET_HALTED) {
2262 LOG_WARNING("target not halted");
2263 return ERROR_TARGET_NOT_HALTED;
2264 }
2265
2266 if (!count)
2267 return ERROR_OK;
2268
2269 /* Clear any abort. */
2270 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2271 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2272 if (retval != ERROR_OK)
2273 return retval;
2274
2275 /* Read DSCR. */
2276 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2277 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2278 if (retval != ERROR_OK)
2279 return retval;
2280
2281 /* Switch to non-blocking mode if not already in that mode. */
2282 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2283 if (retval != ERROR_OK)
2284 goto out;
2285
2286 /* Mark R0 as dirty. */
2287 arm_reg_current(arm, 0)->dirty = true;
2288
2289 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2290 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2291 if (retval != ERROR_OK)
2292 goto out;
2293
2294 /* Get the memory address into R0. */
2295 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2296 armv7a->debug_base + CPUDBG_DTRRX, address);
2297 if (retval != ERROR_OK)
2298 goto out;
2299 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2300 if (retval != ERROR_OK)
2301 goto out;
2302
2303 if (size == 4 && (address % 4) == 0) {
2304 /* We are doing a word-aligned transfer, so use fast mode. */
2305 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2306 } else {
2307 /* Use slow path. */
2308 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2309 }
2310
2311 out:
2312 final_retval = retval;
2313
2314 /* Switch to non-blocking mode if not already in that mode. */
2315 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2316 if (final_retval == ERROR_OK)
2317 final_retval = retval;
2318
2319 /* Wait for last issued instruction to complete. */
2320 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2321 if (final_retval == ERROR_OK)
2322 final_retval = retval;
2323
2324 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2325 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2326 * check RXfull_l). Most of the time this will be free because RXfull_l
2327 * will be cleared immediately and cached in dscr. However, don't do this
2328 * if there is fault, because then the instruction might not have completed
2329 * successfully. */
2330 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2331 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2332 if (retval != ERROR_OK)
2333 return retval;
2334 }
2335
2336 /* If there were any sticky abort flags, clear them. */
2337 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2338 fault_dscr = dscr;
2339 mem_ap_write_atomic_u32(armv7a->debug_ap,
2340 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2341 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2342 } else {
2343 fault_dscr = 0;
2344 }
2345
2346 /* Handle synchronous data faults. */
2347 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2348 if (final_retval == ERROR_OK) {
2349 /* Final return value will reflect cause of fault. */
2350 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2351 if (retval == ERROR_OK) {
2352 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2353 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2354 } else
2355 final_retval = retval;
2356 }
2357 /* Fault destroyed DFAR/DFSR; restore them. */
2358 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2359 if (retval != ERROR_OK)
2360 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2361 }
2362
2363 /* Handle asynchronous data faults. */
2364 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2365 if (final_retval == ERROR_OK)
2366 /* No other error has been recorded so far, so keep this one. */
2367 final_retval = ERROR_TARGET_DATA_ABORT;
2368 }
2369
2370 /* If the DCC is nonempty, clear it. */
2371 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2372 uint32_t dummy;
2373 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2374 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2375 if (final_retval == ERROR_OK)
2376 final_retval = retval;
2377 }
2378 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2379 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2380 if (final_retval == ERROR_OK)
2381 final_retval = retval;
2382 }
2383
2384 /* Done. */
2385 return final_retval;
2386 }
2387
2388 static int cortex_a_read_cpu_memory_slow(struct target *target,
2389 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2390 {
2391 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2392 * in *dscr; updated to new value. This is slow because it works for
2393 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2394 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2395 * preferred.
2396 * Preconditions:
2397 * - Address is in R0.
2398 * - R0 is marked dirty.
2399 */
2400 struct armv7a_common *armv7a = target_to_armv7a(target);
2401 struct arm *arm = &armv7a->arm;
2402 int retval;
2403
2404 /* Mark register R1 as dirty, to use for transferring data. */
2405 arm_reg_current(arm, 1)->dirty = true;
2406
2407 /* Switch to non-blocking mode if not already in that mode. */
2408 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2409 if (retval != ERROR_OK)
2410 return retval;
2411
2412 /* Go through the objects. */
2413 while (count) {
2414 /* Issue a load of the appropriate size to R1. */
2415 uint32_t opcode, data;
2416 if (size == 1)
2417 opcode = ARMV4_5_LDRB_IP(1, 0);
2418 else if (size == 2)
2419 opcode = ARMV4_5_LDRH_IP(1, 0);
2420 else
2421 opcode = ARMV4_5_LDRW_IP(1, 0);
2422 retval = cortex_a_exec_opcode(target, opcode, dscr);
2423 if (retval != ERROR_OK)
2424 return retval;
2425
2426 /* Issue a write of R1 to DTRTX. */
2427 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2428 if (retval != ERROR_OK)
2429 return retval;
2430
2431 /* Check for faults and return early. */
2432 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2433 return ERROR_OK; /* A data fault is not considered a system failure. */
2434
2435 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2436 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2437 * must also check TXfull_l). Most of the time this will be free
2438 * because TXfull_l will be set immediately and cached in dscr. */
2439 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2440 DSCR_DTRTX_FULL_LATCHED, dscr);
2441 if (retval != ERROR_OK)
2442 return retval;
2443
2444 /* Read the value transferred to DTRTX into the buffer. */
2445 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2446 armv7a->debug_base + CPUDBG_DTRTX, &data);
2447 if (retval != ERROR_OK)
2448 return retval;
2449 if (size == 1)
2450 *buffer = (uint8_t) data;
2451 else if (size == 2)
2452 target_buffer_set_u16(target, buffer, (uint16_t) data);
2453 else
2454 target_buffer_set_u32(target, buffer, data);
2455
2456 /* Advance. */
2457 buffer += size;
2458 --count;
2459 }
2460
2461 return ERROR_OK;
2462 }
2463
2464 static int cortex_a_read_cpu_memory_fast(struct target *target,
2465 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2466 {
2467 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2468 * *dscr; updated to new value. This is fast but only works for word-sized
2469 * objects at aligned addresses.
2470 * Preconditions:
2471 * - Address is in R0 and must be a multiple of 4.
2472 * - R0 is marked dirty.
2473 */
2474 struct armv7a_common *armv7a = target_to_armv7a(target);
2475 uint32_t u32;
2476 int retval;
2477
2478 /* Switch to non-blocking mode if not already in that mode. */
2479 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2480 if (retval != ERROR_OK)
2481 return retval;
2482
2483 /* Issue the LDC instruction via a write to ITR. */
2484 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2485 if (retval != ERROR_OK)
2486 return retval;
2487
2488 count--;
2489
2490 if (count > 0) {
2491 /* Switch to fast mode if not already in that mode. */
2492 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2493 if (retval != ERROR_OK)
2494 return retval;
2495
2496 /* Latch LDC instruction. */
2497 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2498 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2499 if (retval != ERROR_OK)
2500 return retval;
2501
2502 /* Read the value transferred to DTRTX into the buffer. Due to fast
2503 * mode rules, this blocks until the instruction finishes executing and
2504 * then reissues the read instruction to read the next word from
2505 * memory. The last read of DTRTX in this call reads the second-to-last
2506 * word from memory and issues the read instruction for the last word.
2507 */
2508 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2509 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2510 if (retval != ERROR_OK)
2511 return retval;
2512
2513 /* Advance. */
2514 buffer += count * 4;
2515 }
2516
2517 /* Wait for last issued instruction to complete. */
2518 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2519 if (retval != ERROR_OK)
2520 return retval;
2521
2522 /* Switch to non-blocking mode if not already in that mode. */
2523 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2524 if (retval != ERROR_OK)
2525 return retval;
2526
2527 /* Check for faults and return early. */
2528 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2529 return ERROR_OK; /* A data fault is not considered a system failure. */
2530
2531 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2532 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2533 * check TXfull_l). Most of the time this will be free because TXfull_l
2534 * will be set immediately and cached in dscr. */
2535 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2536 DSCR_DTRTX_FULL_LATCHED, dscr);
2537 if (retval != ERROR_OK)
2538 return retval;
2539
2540 /* Read the value transferred to DTRTX into the buffer. This is the last
2541 * word. */
2542 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2543 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2544 if (retval != ERROR_OK)
2545 return retval;
2546 target_buffer_set_u32(target, buffer, u32);
2547
2548 return ERROR_OK;
2549 }
2550
2551 static int cortex_a_read_cpu_memory(struct target *target,
2552 uint32_t address, uint32_t size,
2553 uint32_t count, uint8_t *buffer)
2554 {
2555 /* Read memory through the CPU. */
2556 int retval, final_retval;
2557 struct armv7a_common *armv7a = target_to_armv7a(target);
2558 struct arm *arm = &armv7a->arm;
2559 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2560
2561 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2562 address, size, count);
2563 if (target->state != TARGET_HALTED) {
2564 LOG_WARNING("target not halted");
2565 return ERROR_TARGET_NOT_HALTED;
2566 }
2567
2568 if (!count)
2569 return ERROR_OK;
2570
2571 /* Clear any abort. */
2572 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2573 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2574 if (retval != ERROR_OK)
2575 return retval;
2576
2577 /* Read DSCR */
2578 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2579 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2580 if (retval != ERROR_OK)
2581 return retval;
2582
2583 /* Switch to non-blocking mode if not already in that mode. */
2584 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2585 if (retval != ERROR_OK)
2586 goto out;
2587
2588 /* Mark R0 as dirty. */
2589 arm_reg_current(arm, 0)->dirty = true;
2590
2591 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2592 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2593 if (retval != ERROR_OK)
2594 goto out;
2595
2596 /* Get the memory address into R0. */
2597 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2598 armv7a->debug_base + CPUDBG_DTRRX, address);
2599 if (retval != ERROR_OK)
2600 goto out;
2601 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2602 if (retval != ERROR_OK)
2603 goto out;
2604
2605 if (size == 4 && (address % 4) == 0) {
2606 /* We are doing a word-aligned transfer, so use fast mode. */
2607 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2608 } else {
2609 /* Use slow path. */
2610 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2611 }
2612
2613 out:
2614 final_retval = retval;
2615
2616 /* Switch to non-blocking mode if not already in that mode. */
2617 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2618 if (final_retval == ERROR_OK)
2619 final_retval = retval;
2620
2621 /* Wait for last issued instruction to complete. */
2622 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2623 if (final_retval == ERROR_OK)
2624 final_retval = retval;
2625
2626 /* If there were any sticky abort flags, clear them. */
2627 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2628 fault_dscr = dscr;
2629 mem_ap_write_atomic_u32(armv7a->debug_ap,
2630 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2631 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2632 } else {
2633 fault_dscr = 0;
2634 }
2635
2636 /* Handle synchronous data faults. */
2637 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2638 if (final_retval == ERROR_OK) {
2639 /* Final return value will reflect cause of fault. */
2640 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2641 if (retval == ERROR_OK) {
2642 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2643 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2644 } else
2645 final_retval = retval;
2646 }
2647 /* Fault destroyed DFAR/DFSR; restore them. */
2648 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2649 if (retval != ERROR_OK)
2650 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2651 }
2652
2653 /* Handle asynchronous data faults. */
2654 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2655 if (final_retval == ERROR_OK)
2656 /* No other error has been recorded so far, so keep this one. */
2657 final_retval = ERROR_TARGET_DATA_ABORT;
2658 }
2659
2660 /* If the DCC is nonempty, clear it. */
2661 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2662 uint32_t dummy;
2663 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2664 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2665 if (final_retval == ERROR_OK)
2666 final_retval = retval;
2667 }
2668 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2669 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2670 if (final_retval == ERROR_OK)
2671 final_retval = retval;
2672 }
2673
2674 /* Done. */
2675 return final_retval;
2676 }
2677
2678
2679 /*
2680 * Cortex-A Memory access
2681 *
2682 * This is same Cortex-M3 but we must also use the correct
2683 * ap number for every access.
2684 */
2685
2686 static int cortex_a_read_phys_memory(struct target *target,
2687 uint32_t address, uint32_t size,
2688 uint32_t count, uint8_t *buffer)
2689 {
2690 struct armv7a_common *armv7a = target_to_armv7a(target);
2691 struct adiv5_dap *swjdp = armv7a->arm.dap;
2692 uint8_t apsel = swjdp->apsel;
2693 int retval;
2694
2695 if (!count || !buffer)
2696 return ERROR_COMMAND_SYNTAX_ERROR;
2697
2698 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2699 address, size, count);
2700
2701 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2702 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2703
2704 /* read memory through the CPU */
2705 cortex_a_prep_memaccess(target, 1);
2706 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2707 cortex_a_post_memaccess(target, 1);
2708
2709 return retval;
2710 }
2711
2712 static int cortex_a_read_memory(struct target *target, uint32_t address,
2713 uint32_t size, uint32_t count, uint8_t *buffer)
2714 {
2715 int retval;
2716
2717 /* cortex_a handles unaligned memory access */
2718 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2719 size, count);
2720
2721 cortex_a_prep_memaccess(target, 0);
2722 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2723 cortex_a_post_memaccess(target, 0);
2724
2725 return retval;
2726 }
2727
2728 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2729 uint32_t size, uint32_t count, uint8_t *buffer)
2730 {
2731 int mmu_enabled = 0;
2732 uint32_t virt, phys;
2733 int retval;
2734 struct armv7a_common *armv7a = target_to_armv7a(target);
2735 struct adiv5_dap *swjdp = armv7a->arm.dap;
2736 uint8_t apsel = swjdp->apsel;
2737
2738 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2739 return target_read_memory(target, address, size, count, buffer);
2740
2741 /* cortex_a handles unaligned memory access */
2742 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2743 size, count);
2744
2745 /* determine if MMU was enabled on target stop */
2746 if (!armv7a->is_armv7r) {
2747 retval = cortex_a_mmu(target, &mmu_enabled);
2748 if (retval != ERROR_OK)
2749 return retval;
2750 }
2751
2752 if (mmu_enabled) {
2753 virt = address;
2754 retval = cortex_a_virt2phys(target, virt, &phys);
2755 if (retval != ERROR_OK)
2756 return retval;
2757
2758 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2759 virt, phys);
2760 address = phys;
2761 }
2762
2763 if (!count || !buffer)
2764 return ERROR_COMMAND_SYNTAX_ERROR;
2765
2766 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2767
2768 return retval;
2769 }
2770
2771 static int cortex_a_write_phys_memory(struct target *target,
2772 uint32_t address, uint32_t size,
2773 uint32_t count, const uint8_t *buffer)
2774 {
2775 struct armv7a_common *armv7a = target_to_armv7a(target);
2776 struct adiv5_dap *swjdp = armv7a->arm.dap;
2777 uint8_t apsel = swjdp->apsel;
2778 int retval;
2779
2780 if (!count || !buffer)
2781 return ERROR_COMMAND_SYNTAX_ERROR;
2782
2783 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2784 size, count);
2785
2786 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2787 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2788
2789 /* write memory through the CPU */
2790 cortex_a_prep_memaccess(target, 1);
2791 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2792 cortex_a_post_memaccess(target, 1);
2793
2794 return retval;
2795 }
2796
2797 static int cortex_a_write_memory(struct target *target, uint32_t address,
2798 uint32_t size, uint32_t count, const uint8_t *buffer)
2799 {
2800 int retval;
2801
2802 /* cortex_a handles unaligned memory access */
2803 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2804 size, count);
2805
2806 /* memory writes bypass the caches, must flush before writing */
2807 armv7a_cache_auto_flush_on_write(target, address, size * count);
2808
2809 cortex_a_prep_memaccess(target, 0);
2810 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2811 cortex_a_post_memaccess(target, 0);
2812 return retval;
2813 }
2814
2815 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2816 uint32_t size, uint32_t count, const uint8_t *buffer)
2817 {
2818 int mmu_enabled = 0;
2819 uint32_t virt, phys;
2820 int retval;
2821 struct armv7a_common *armv7a = target_to_armv7a(target);
2822 struct adiv5_dap *swjdp = armv7a->arm.dap;
2823 uint8_t apsel = swjdp->apsel;
2824
2825 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2826 return target_write_memory(target, address, size, count, buffer);
2827
2828 /* cortex_a handles unaligned memory access */
2829 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2830 size, count);
2831
2832 /* determine if MMU was enabled on target stop */
2833 if (!armv7a->is_armv7r) {
2834 retval = cortex_a_mmu(target, &mmu_enabled);
2835 if (retval != ERROR_OK)
2836 return retval;
2837 }
2838
2839 if (mmu_enabled) {
2840 virt = address;
2841 retval = cortex_a_virt2phys(target, virt, &phys);
2842 if (retval != ERROR_OK)
2843 return retval;
2844
2845 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2846 virt,
2847 phys);
2848 address = phys;
2849 }
2850
2851 if (!count || !buffer)
2852 return ERROR_COMMAND_SYNTAX_ERROR;
2853
2854 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2855
2856 return retval;
2857 }
2858
2859 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2860 uint32_t count, uint8_t *buffer)
2861 {
2862 uint32_t size;
2863
2864 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2865 * will have something to do with the size we leave to it. */
2866 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2867 if (address & size) {
2868 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2869 if (retval != ERROR_OK)
2870 return retval;
2871 address += size;
2872 count -= size;
2873 buffer += size;
2874 }
2875 }
2876
2877 /* Read the data with as large access size as possible. */
2878 for (; size > 0; size /= 2) {
2879 uint32_t aligned = count - count % size;
2880 if (aligned > 0) {
2881 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2882 if (retval != ERROR_OK)
2883 return retval;
2884 address += aligned;
2885 count -= aligned;
2886 buffer += aligned;
2887 }
2888 }
2889
2890 return ERROR_OK;
2891 }
2892
2893 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2894 uint32_t count, const uint8_t *buffer)
2895 {
2896 uint32_t size;
2897
2898 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2899 * will have something to do with the size we leave to it. */
2900 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2901 if (address & size) {
2902 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2903 if (retval != ERROR_OK)
2904 return retval;
2905 address += size;
2906 count -= size;
2907 buffer += size;
2908 }
2909 }
2910
2911 /* Write the data with as large access size as possible. */
2912 for (; size > 0; size /= 2) {
2913 uint32_t aligned = count - count % size;
2914 if (aligned > 0) {
2915 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2916 if (retval != ERROR_OK)
2917 return retval;
2918 address += aligned;
2919 count -= aligned;
2920 buffer += aligned;
2921 }
2922 }
2923
2924 return ERROR_OK;
2925 }
2926
2927 static int cortex_a_handle_target_request(void *priv)
2928 {
2929 struct target *target = priv;
2930 struct armv7a_common *armv7a = target_to_armv7a(target);
2931 int retval;
2932
2933 if (!target_was_examined(target))
2934 return ERROR_OK;
2935 if (!target->dbg_msg_enabled)
2936 return ERROR_OK;
2937
2938 if (target->state == TARGET_RUNNING) {
2939 uint32_t request;
2940 uint32_t dscr;
2941 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2942 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2943
2944 /* check if we have data */
2945 int64_t then = timeval_ms();
2946 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2947 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2948 armv7a->debug_base + CPUDBG_DTRTX, &request);
2949 if (retval == ERROR_OK) {
2950 target_request(target, request);
2951 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2952 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2953 }
2954 if (timeval_ms() > then + 1000) {
2955 LOG_ERROR("Timeout waiting for dtr tx full");
2956 return ERROR_FAIL;
2957 }
2958 }
2959 }
2960
2961 return ERROR_OK;
2962 }
2963
2964 /*
2965 * Cortex-A target information and configuration
2966 */
2967
2968 static int cortex_a_examine_first(struct target *target)
2969 {
2970 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2971 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2972 struct adiv5_dap *swjdp = armv7a->arm.dap;
2973
2974 int i;
2975 int retval = ERROR_OK;
2976 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2977
2978 retval = dap_dp_init(swjdp);
2979 if (retval != ERROR_OK) {
2980 LOG_ERROR("Could not initialize the debug port");
2981 return retval;
2982 }
2983
2984 /* Search for the APB-AP - it is needed for access to debug registers */
2985 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2986 if (retval != ERROR_OK) {
2987 LOG_ERROR("Could not find APB-AP for debug access");
2988 return retval;
2989 }
2990
2991 retval = mem_ap_init(armv7a->debug_ap);
2992 if (retval != ERROR_OK) {
2993 LOG_ERROR("Could not initialize the APB-AP");
2994 return retval;
2995 }
2996
2997 armv7a->debug_ap->memaccess_tck = 80;
2998
2999 /* Search for the AHB-AB.
3000 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
3001 * can access system memory. */
3002 armv7a->memory_ap_available = false;
3003 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
3004 if (retval == ERROR_OK) {
3005 retval = mem_ap_init(armv7a->memory_ap);
3006 if (retval == ERROR_OK)
3007 armv7a->memory_ap_available = true;
3008 }
3009 if (retval != ERROR_OK) {
3010 /* AHB-AP not found or unavailable - use the CPU */
3011 LOG_DEBUG("No AHB-AP available for memory access");
3012 }
3013
3014 if (!target->dbgbase_set) {
3015 uint32_t dbgbase;
3016 /* Get ROM Table base */
3017 uint32_t apid;
3018 int32_t coreidx = target->coreid;
3019 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
3020 target->cmd_name);
3021 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
3022 if (retval != ERROR_OK)
3023 return retval;
3024 /* Lookup 0x15 -- Processor DAP */
3025 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
3026 &armv7a->debug_base, &coreidx);
3027 if (retval != ERROR_OK) {
3028 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
3029 target->cmd_name);
3030 return retval;
3031 }
3032 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
3033 target->coreid, armv7a->debug_base);
3034 } else
3035 armv7a->debug_base = target->dbgbase;
3036
3037 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3038 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3039 if (retval != ERROR_OK)
3040 return retval;
3041
3042 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3043 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3044 if (retval != ERROR_OK) {
3045 LOG_DEBUG("Examine %s failed", "CPUID");
3046 return retval;
3047 }
3048
3049 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3050 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
3051 if (retval != ERROR_OK) {
3052 LOG_DEBUG("Examine %s failed", "CTYPR");
3053 return retval;
3054 }
3055
3056 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3057 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
3058 if (retval != ERROR_OK) {
3059 LOG_DEBUG("Examine %s failed", "TTYPR");
3060 return retval;
3061 }
3062
3063 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3064 armv7a->debug_base + CPUDBG_DIDR, &didr);
3065 if (retval != ERROR_OK) {
3066 LOG_DEBUG("Examine %s failed", "DIDR");
3067 return retval;
3068 }
3069
3070 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3071 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
3072 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
3073 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3074
3075 cortex_a->cpuid = cpuid;
3076 cortex_a->ctypr = ctypr;
3077 cortex_a->ttypr = ttypr;
3078 cortex_a->didr = didr;
3079
3080 /* Unlocking the debug registers */
3081 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3082 CORTEX_A15_PARTNUM) {
3083
3084 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3085 armv7a->debug_base + CPUDBG_OSLAR,
3086 0);
3087
3088 if (retval != ERROR_OK)
3089 return retval;
3090
3091 }
3092 /* Unlocking the debug registers */
3093 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3094 CORTEX_A7_PARTNUM) {
3095
3096 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3097 armv7a->debug_base + CPUDBG_OSLAR,
3098 0);
3099
3100 if (retval != ERROR_OK)
3101 return retval;
3102
3103 }
3104 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3105 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3106
3107 if (retval != ERROR_OK)
3108 return retval;
3109
3110 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3111
3112 armv7a->arm.core_type = ARM_MODE_MON;
3113
3114 /* Avoid recreating the registers cache */
3115 if (!target_was_examined(target)) {
3116 retval = cortex_a_dpm_setup(cortex_a, didr);
3117 if (retval != ERROR_OK)
3118 return retval;
3119 }
3120
3121 /* Setup Breakpoint Register Pairs */
3122 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3123 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3124 cortex_a->brp_num_available = cortex_a->brp_num;
3125 free(cortex_a->brp_list);
3126 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3127 /* cortex_a->brb_enabled = ????; */
3128 for (i = 0; i < cortex_a->brp_num; i++) {
3129 cortex_a->brp_list[i].used = 0;
3130 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3131 cortex_a->brp_list[i].type = BRP_NORMAL;
3132 else
3133 cortex_a->brp_list[i].type = BRP_CONTEXT;
3134 cortex_a->brp_list[i].value = 0;
3135 cortex_a->brp_list[i].control = 0;
3136 cortex_a->brp_list[i].BRPn = i;
3137 }
3138
3139 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3140
3141 /* select debug_ap as default */
3142 swjdp->apsel = armv7a->debug_ap->ap_num;
3143
3144 target_set_examined(target);
3145 return ERROR_OK;
3146 }
3147
3148 static int cortex_a_examine(struct target *target)
3149 {
3150 int retval = ERROR_OK;
3151
3152 /* Reestablish communication after target reset */
3153 retval = cortex_a_examine_first(target);
3154
3155 /* Configure core debug access */
3156 if (retval == ERROR_OK)
3157 retval = cortex_a_init_debug_access(target);
3158
3159 return retval;
3160 }
3161
3162 /*
3163 * Cortex-A target creation and initialization
3164 */
3165
3166 static int cortex_a_init_target(struct command_context *cmd_ctx,
3167 struct target *target)
3168 {
3169 /* examine_first() does a bunch of this */
3170 return ERROR_OK;
3171 }
3172
3173 static int cortex_a_init_arch_info(struct target *target,
3174 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3175 {
3176 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3177
3178 /* Setup struct cortex_a_common */
3179 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3180
3181 /* tap has no dap initialized */
3182 if (!tap->dap) {
3183 tap->dap = dap_init();
3184
3185 /* Leave (only) generic DAP stuff for debugport_init() */
3186 tap->dap->tap = tap;
3187 }
3188
3189 armv7a->arm.dap = tap->dap;
3190
3191 cortex_a->fast_reg_read = 0;
3192
3193 /* register arch-specific functions */
3194 armv7a->examine_debug_reason = NULL;
3195
3196 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3197
3198 armv7a->pre_restore_context = NULL;
3199
3200 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3201
3202
3203 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3204
3205 /* REVISIT v7a setup should be in a v7a-specific routine */
3206 armv7a_init_arch_info(target, armv7a);
3207 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3208
3209 return ERROR_OK;
3210 }
3211
3212 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3213 {
3214 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3215
3216 cortex_a->armv7a_common.is_armv7r = false;
3217
3218 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3219 }
3220
3221 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3222 {
3223 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3224
3225 cortex_a->armv7a_common.is_armv7r = true;
3226
3227 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3228 }
3229
3230 static void cortex_a_deinit_target(struct target *target)
3231 {
3232 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3233 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3234
3235 free(cortex_a->brp_list);
3236 free(dpm->dbp);
3237 free(dpm->dwp);
3238 free(cortex_a);
3239 }
3240
3241 static int cortex_a_mmu(struct target *target, int *enabled)
3242 {
3243 struct armv7a_common *armv7a = target_to_armv7a(target);
3244
3245 if (target->state != TARGET_HALTED) {
3246 LOG_ERROR("%s: target not halted", __func__);
3247 return ERROR_TARGET_INVALID;
3248 }
3249
3250 if (armv7a->is_armv7r)
3251 *enabled = 0;
3252 else
3253 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3254
3255 return ERROR_OK;
3256 }
3257
3258 static int cortex_a_virt2phys(struct target *target,
3259 uint32_t virt, uint32_t *phys)
3260 {
3261 int retval = ERROR_FAIL;
3262 struct armv7a_common *armv7a = target_to_armv7a(target);
3263 struct adiv5_dap *swjdp = armv7a->arm.dap;
3264 uint8_t apsel = swjdp->apsel;
3265 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3266 uint32_t ret;
3267 retval = armv7a_mmu_translate_va(target,
3268 virt, &ret);
3269 if (retval != ERROR_OK)
3270 goto done;
3271 *phys = ret;
3272 } else {/* use this method if armv7a->memory_ap not selected
3273 * mmu must be enable in order to get a correct translation */
3274 retval = cortex_a_mmu_modify(target, 1);
3275 if (retval != ERROR_OK)
3276 goto done;
3277 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3278 }
3279 done:
3280 return retval;
3281 }
3282
3283 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3284 {
3285 struct target *target = get_current_target(CMD_CTX);
3286 struct armv7a_common *armv7a = target_to_armv7a(target);
3287
3288 return armv7a_handle_cache_info_command(CMD_CTX,
3289 &armv7a->armv7a_mmu.armv7a_cache);
3290 }
3291
3292
3293 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3294 {
3295 struct target *target = get_current_target(CMD_CTX);
3296 if (!target_was_examined(target)) {
3297 LOG_ERROR("target not examined yet");
3298 return ERROR_FAIL;
3299 }
3300
3301 return cortex_a_init_debug_access(target);
3302 }
3303 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3304 {
3305 struct target *target = get_current_target(CMD_CTX);
3306 /* check target is an smp target */
3307 struct target_list *head;
3308 struct target *curr;
3309 head = target->head;
3310 target->smp = 0;
3311 if (head != (struct target_list *)NULL) {
3312 while (head != (struct target_list *)NULL) {
3313 curr = head->target;
3314 curr->smp = 0;
3315 head = head->next;
3316 }
3317 /* fixes the target display to the debugger */
3318 target->gdb_service->target = target;
3319 }
3320 return ERROR_OK;
3321 }
3322
3323 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3324 {
3325 struct target *target = get_current_target(CMD_CTX);
3326 struct target_list *head;
3327 struct target *curr;
3328 head = target->head;
3329 if (head != (struct target_list *)NULL) {
3330 target->smp = 1;
3331 while (head != (struct target_list *)NULL) {
3332 curr = head->target;
3333 curr->smp = 1;
3334 head = head->next;
3335 }
3336 }
3337 return ERROR_OK;
3338 }
3339
3340 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3341 {
3342 struct target *target = get_current_target(CMD_CTX);
3343 int retval = ERROR_OK;
3344 struct target_list *head;
3345 head = target->head;
3346 if (head != (struct target_list *)NULL) {
3347 if (CMD_ARGC == 1) {
3348 int coreid = 0;
3349 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3350 if (ERROR_OK != retval)
3351 return retval;
3352 target->gdb_service->core[1] = coreid;
3353
3354 }
3355 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3356 , target->gdb_service->core[1]);
3357 }
3358 return ERROR_OK;
3359 }
3360
3361 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3362 {
3363 struct target *target = get_current_target(CMD_CTX);
3364 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3365
3366 static const Jim_Nvp nvp_maskisr_modes[] = {
3367 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3368 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3369 { .name = NULL, .value = -1 },
3370 };
3371 const Jim_Nvp *n;
3372
3373 if (CMD_ARGC > 0) {
3374 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3375 if (n->name == NULL) {
3376 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3377 return ERROR_COMMAND_SYNTAX_ERROR;
3378 }
3379
3380 cortex_a->isrmasking_mode = n->value;
3381 }
3382
3383 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3384 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3385
3386 return ERROR_OK;
3387 }
3388
3389 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3390 {
3391 struct target *target = get_current_target(CMD_CTX);
3392 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3393
3394 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3395 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3396 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3397 { .name = NULL, .value = -1 },
3398 };
3399 const Jim_Nvp *n;
3400
3401 if (CMD_ARGC > 0) {
3402 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3403 if (n->name == NULL)
3404 return ERROR_COMMAND_SYNTAX_ERROR;
3405 cortex_a->dacrfixup_mode = n->value;
3406
3407 }
3408
3409 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3410 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3411
3412 return ERROR_OK;
3413 }
3414
3415 static const struct command_registration cortex_a_exec_command_handlers[] = {
3416 {
3417 .name = "cache_info",
3418 .handler = cortex_a_handle_cache_info_command,
3419 .mode = COMMAND_EXEC,
3420 .help = "display information about target caches",
3421 .usage = "",
3422 },
3423 {
3424 .name = "dbginit",
3425 .handler = cortex_a_handle_dbginit_command,
3426 .mode = COMMAND_EXEC,
3427 .help = "Initialize core debug",
3428 .usage = "",
3429 },
3430 { .name = "smp_off",
3431 .handler = cortex_a_handle_smp_off_command,
3432 .mode = COMMAND_EXEC,
3433 .help = "Stop smp handling",
3434 .usage = "",},
3435 {
3436 .name = "smp_on",
3437 .handler = cortex_a_handle_smp_on_command,
3438 .mode = COMMAND_EXEC,
3439 .help = "Restart smp handling",
3440 .usage = "",
3441 },
3442 {
3443 .name = "smp_gdb",
3444 .handler = cortex_a_handle_smp_gdb_command,
3445 .mode = COMMAND_EXEC,
3446 .help = "display/fix current core played to gdb",
3447 .usage = "",
3448 },
3449 {
3450 .name = "maskisr",
3451 .handler = handle_cortex_a_mask_interrupts_command,
3452 .mode = COMMAND_ANY,
3453 .help = "mask cortex_a interrupts",
3454 .usage = "['on'|'off']",
3455 },
3456 {
3457 .name = "dacrfixup",
3458 .handler = handle_cortex_a_dacrfixup_command,
3459 .mode = COMMAND_EXEC,
3460 .help = "set domain access control (DACR) to all-manager "
3461 "on memory access",
3462 .usage = "['on'|'off']",
3463 },
3464
3465 COMMAND_REGISTRATION_DONE
3466 };
3467 static const struct command_registration cortex_a_command_handlers[] = {
3468 {
3469 .chain = arm_command_handlers,
3470 },
3471 {
3472 .chain = armv7a_command_handlers,
3473 },
3474 {
3475 .name = "cortex_a",
3476 .mode = COMMAND_ANY,
3477 .help = "Cortex-A command group",
3478 .usage = "",
3479 .chain = cortex_a_exec_command_handlers,
3480 },
3481 COMMAND_REGISTRATION_DONE
3482 };
3483
3484 struct target_type cortexa_target = {
3485 .name = "cortex_a",
3486 .deprecated_name = "cortex_a8",
3487
3488 .poll = cortex_a_poll,
3489 .arch_state = armv7a_arch_state,
3490
3491 .halt = cortex_a_halt,
3492 .resume = cortex_a_resume,
3493 .step = cortex_a_step,
3494
3495 .assert_reset = cortex_a_assert_reset,
3496 .deassert_reset = cortex_a_deassert_reset,
3497
3498 /* REVISIT allow exporting VFP3 registers ... */
3499 .get_gdb_reg_list = arm_get_gdb_reg_list,
3500
3501 .read_memory = cortex_a_read_memory,
3502 .write_memory = cortex_a_write_memory,
3503
3504 .read_buffer = cortex_a_read_buffer,
3505 .write_buffer = cortex_a_write_buffer,
3506
3507 .checksum_memory = arm_checksum_memory,
3508 .blank_check_memory = arm_blank_check_memory,
3509
3510 .run_algorithm = armv4_5_run_algorithm,
3511
3512 .add_breakpoint = cortex_a_add_breakpoint,
3513 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3514 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3515 .remove_breakpoint = cortex_a_remove_breakpoint,
3516 .add_watchpoint = NULL,
3517 .remove_watchpoint = NULL,
3518
3519 .commands = cortex_a_command_handlers,
3520 .target_create = cortex_a_target_create,
3521 .init_target = cortex_a_init_target,
3522 .examine = cortex_a_examine,
3523 .deinit_target = cortex_a_deinit_target,
3524
3525 .read_phys_memory = cortex_a_read_phys_memory,
3526 .write_phys_memory = cortex_a_write_phys_memory,
3527 .mmu = cortex_a_mmu,
3528 .virt2phys = cortex_a_virt2phys,
3529 };
3530
3531 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3532 {
3533 .name = "cache_info",
3534 .handler = cortex_a_handle_cache_info_command,
3535 .mode = COMMAND_EXEC,
3536 .help = "display information about target caches",
3537 .usage = "",
3538 },
3539 {
3540 .name = "dbginit",
3541 .handler = cortex_a_handle_dbginit_command,
3542 .mode = COMMAND_EXEC,
3543 .help = "Initialize core debug",
3544 .usage = "",
3545 },
3546 {
3547 .name = "maskisr",
3548 .handler = handle_cortex_a_mask_interrupts_command,
3549 .mode = COMMAND_EXEC,
3550 .help = "mask cortex_r4 interrupts",
3551 .usage = "['on'|'off']",
3552 },
3553
3554 COMMAND_REGISTRATION_DONE
3555 };
3556 static const struct command_registration cortex_r4_command_handlers[] = {
3557 {
3558 .chain = arm_command_handlers,
3559 },
3560 {
3561 .chain = armv7a_command_handlers,
3562 },
3563 {
3564 .name = "cortex_r4",
3565 .mode = COMMAND_ANY,
3566 .help = "Cortex-R4 command group",
3567 .usage = "",
3568 .chain = cortex_r4_exec_command_handlers,
3569 },
3570 COMMAND_REGISTRATION_DONE
3571 };
3572
3573 struct target_type cortexr4_target = {
3574 .name = "cortex_r4",
3575
3576 .poll = cortex_a_poll,
3577 .arch_state = armv7a_arch_state,
3578
3579 .halt = cortex_a_halt,
3580 .resume = cortex_a_resume,
3581 .step = cortex_a_step,
3582
3583 .assert_reset = cortex_a_assert_reset,
3584 .deassert_reset = cortex_a_deassert_reset,
3585
3586 /* REVISIT allow exporting VFP3 registers ... */
3587 .get_gdb_reg_list = arm_get_gdb_reg_list,
3588
3589 .read_memory = cortex_a_read_memory,
3590 .write_memory = cortex_a_write_memory,
3591
3592 .checksum_memory = arm_checksum_memory,
3593 .blank_check_memory = arm_blank_check_memory,
3594
3595 .run_algorithm = armv4_5_run_algorithm,
3596
3597 .add_breakpoint = cortex_a_add_breakpoint,
3598 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3599 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3600 .remove_breakpoint = cortex_a_remove_breakpoint,
3601 .add_watchpoint = NULL,
3602 .remove_watchpoint = NULL,
3603
3604 .commands = cortex_r4_command_handlers,
3605 .target_create = cortex_r4_target_create,
3606 .init_target = cortex_a_init_target,
3607 .examine = cortex_a_examine,
3608 .deinit_target = cortex_a_deinit_target,
3609 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)