cortex_a: allow physical memory access through AHB-AP again
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 uint32_t virt, uint32_t *phys);
79 static int cortex_a_read_apb_ab_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a8_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 int retval;
206
207 LOG_DEBUG(" ");
208
209 /* Unlocking the debug registers for modification
210 * The debugport might be uninitialised so try twice */
211 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
212 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
213 if (retval != ERROR_OK) {
214 /* try again */
215 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
216 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
217 if (retval == ERROR_OK)
218 LOG_USER(
219 "Locking debug access failed on first, but succeeded on second try.");
220 }
221
222 return retval;
223 }
224
225 /*
226 * Cortex-A Basic debug access, very low level assumes state is saved
227 */
228 static int cortex_a_init_debug_access(struct target *target)
229 {
230 struct armv7a_common *armv7a = target_to_armv7a(target);
231 int retval;
232 uint32_t dbg_osreg;
233 uint32_t cortex_part_num;
234 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
235
236 LOG_DEBUG(" ");
237 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
238 CORTEX_A_MIDR_PARTNUM_SHIFT;
239
240 switch (cortex_part_num) {
241 case CORTEX_A7_PARTNUM:
242 case CORTEX_A15_PARTNUM:
243 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
244 armv7a->debug_base + CPUDBG_OSLSR,
245 &dbg_osreg);
246 if (retval != ERROR_OK)
247 return retval;
248
249 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
250
251 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
252 /* Unlocking the DEBUG OS registers for modification */
253 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
254 armv7a->debug_base + CPUDBG_OSLAR,
255 0);
256 break;
257
258 case CORTEX_A5_PARTNUM:
259 case CORTEX_A8_PARTNUM:
260 case CORTEX_A9_PARTNUM:
261 default:
262 retval = cortex_a8_init_debug_access(target);
263 }
264
265 if (retval != ERROR_OK)
266 return retval;
267 /* Clear Sticky Power Down status Bit in PRSR to enable access to
268 the registers in the Core Power Domain */
269 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
270 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
271 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
272
273 if (retval != ERROR_OK)
274 return retval;
275
276 /* Disable cacheline fills and force cache write-through in debug state */
277 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
278 armv7a->debug_base + CPUDBG_DSCCR, 0);
279 if (retval != ERROR_OK)
280 return retval;
281
282 /* Disable TLB lookup and refill/eviction in debug state */
283 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
284 armv7a->debug_base + CPUDBG_DSMCR, 0);
285 if (retval != ERROR_OK)
286 return retval;
287
288 /* Enabling of instruction execution in debug mode is done in debug_entry code */
289
290 /* Resync breakpoint registers */
291
292 /* Since this is likely called from init or reset, update target state information*/
293 return cortex_a_poll(target);
294 }
295
296 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
297 {
298 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
299 * Writes final value of DSCR into *dscr. Pass force to force always
300 * reading DSCR at least once. */
301 struct armv7a_common *armv7a = target_to_armv7a(target);
302 int64_t then = timeval_ms();
303 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
304 force = false;
305 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
306 armv7a->debug_base + CPUDBG_DSCR, dscr);
307 if (retval != ERROR_OK) {
308 LOG_ERROR("Could not read DSCR register");
309 return retval;
310 }
311 if (timeval_ms() > then + 1000) {
312 LOG_ERROR("Timeout waiting for InstrCompl=1");
313 return ERROR_FAIL;
314 }
315 }
316 return ERROR_OK;
317 }
318
319 /* To reduce needless round-trips, pass in a pointer to the current
320 * DSCR value. Initialize it to zero if you just need to know the
321 * value on return from this function; or DSCR_INSTR_COMP if you
322 * happen to know that no instruction is pending.
323 */
324 static int cortex_a_exec_opcode(struct target *target,
325 uint32_t opcode, uint32_t *dscr_p)
326 {
327 uint32_t dscr;
328 int retval;
329 struct armv7a_common *armv7a = target_to_armv7a(target);
330
331 dscr = dscr_p ? *dscr_p : 0;
332
333 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
334
335 /* Wait for InstrCompl bit to be set */
336 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
337 if (retval != ERROR_OK)
338 return retval;
339
340 retval = mem_ap_write_u32(armv7a->debug_ap,
341 armv7a->debug_base + CPUDBG_ITR, opcode);
342 if (retval != ERROR_OK)
343 return retval;
344
345 int64_t then = timeval_ms();
346 do {
347 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
348 armv7a->debug_base + CPUDBG_DSCR, &dscr);
349 if (retval != ERROR_OK) {
350 LOG_ERROR("Could not read DSCR register");
351 return retval;
352 }
353 if (timeval_ms() > then + 1000) {
354 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
355 return ERROR_FAIL;
356 }
357 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
358
359 if (dscr_p)
360 *dscr_p = dscr;
361
362 return retval;
363 }
364
365 /**************************************************************************
366 Read core register with very few exec_opcode, fast but needs work_area.
367 This can cause problems with MMU active.
368 **************************************************************************/
369 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
370 uint32_t *regfile)
371 {
372 int retval = ERROR_OK;
373 struct armv7a_common *armv7a = target_to_armv7a(target);
374
375 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
376 if (retval != ERROR_OK)
377 return retval;
378 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
379 if (retval != ERROR_OK)
380 return retval;
381 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
382 if (retval != ERROR_OK)
383 return retval;
384
385 retval = mem_ap_read_buf(armv7a->memory_ap,
386 (uint8_t *)(&regfile[1]), 4, 15, address);
387
388 return retval;
389 }
390
391 static int cortex_a_dap_read_coreregister_u32(struct target *target,
392 uint32_t *value, int regnum)
393 {
394 int retval = ERROR_OK;
395 uint8_t reg = regnum&0xFF;
396 uint32_t dscr = 0;
397 struct armv7a_common *armv7a = target_to_armv7a(target);
398
399 if (reg > 17)
400 return retval;
401
402 if (reg < 15) {
403 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
404 retval = cortex_a_exec_opcode(target,
405 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
406 &dscr);
407 if (retval != ERROR_OK)
408 return retval;
409 } else if (reg == 15) {
410 /* "MOV r0, r15"; then move r0 to DCCTX */
411 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
412 if (retval != ERROR_OK)
413 return retval;
414 retval = cortex_a_exec_opcode(target,
415 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 } else {
420 /* "MRS r0, CPSR" or "MRS r0, SPSR"
421 * then move r0 to DCCTX
422 */
423 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 retval = cortex_a_exec_opcode(target,
427 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
428 &dscr);
429 if (retval != ERROR_OK)
430 return retval;
431 }
432
433 /* Wait for DTRRXfull then read DTRRTX */
434 int64_t then = timeval_ms();
435 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
436 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
437 armv7a->debug_base + CPUDBG_DSCR, &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if (timeval_ms() > then + 1000) {
441 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
442 return ERROR_FAIL;
443 }
444 }
445
446 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
447 armv7a->debug_base + CPUDBG_DTRTX, value);
448 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
449
450 return retval;
451 }
452
453 static int cortex_a_dap_write_coreregister_u32(struct target *target,
454 uint32_t value, int regnum)
455 {
456 int retval = ERROR_OK;
457 uint8_t Rd = regnum&0xFF;
458 uint32_t dscr;
459 struct armv7a_common *armv7a = target_to_armv7a(target);
460
461 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
462
463 /* Check that DCCRX is not full */
464 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
465 armv7a->debug_base + CPUDBG_DSCR, &dscr);
466 if (retval != ERROR_OK)
467 return retval;
468 if (dscr & DSCR_DTR_RX_FULL) {
469 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
470 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
471 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
472 &dscr);
473 if (retval != ERROR_OK)
474 return retval;
475 }
476
477 if (Rd > 17)
478 return retval;
479
480 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
481 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
482 retval = mem_ap_write_u32(armv7a->debug_ap,
483 armv7a->debug_base + CPUDBG_DTRRX, value);
484 if (retval != ERROR_OK)
485 return retval;
486
487 if (Rd < 15) {
488 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
489 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
490 &dscr);
491
492 if (retval != ERROR_OK)
493 return retval;
494 } else if (Rd == 15) {
495 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
496 * then "mov r15, r0"
497 */
498 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
499 &dscr);
500 if (retval != ERROR_OK)
501 return retval;
502 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
503 if (retval != ERROR_OK)
504 return retval;
505 } else {
506 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
507 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
508 */
509 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
510 &dscr);
511 if (retval != ERROR_OK)
512 return retval;
513 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
514 &dscr);
515 if (retval != ERROR_OK)
516 return retval;
517
518 /* "Prefetch flush" after modifying execution status in CPSR */
519 if (Rd == 16) {
520 retval = cortex_a_exec_opcode(target,
521 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
522 &dscr);
523 if (retval != ERROR_OK)
524 return retval;
525 }
526 }
527
528 return retval;
529 }
530
531 /* Write to memory mapped registers directly with no cache or mmu handling */
532 static int cortex_a_dap_write_memap_register_u32(struct target *target,
533 uint32_t address,
534 uint32_t value)
535 {
536 int retval;
537 struct armv7a_common *armv7a = target_to_armv7a(target);
538
539 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
540
541 return retval;
542 }
543
544 /*
545 * Cortex-A implementation of Debug Programmer's Model
546 *
547 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
548 * so there's no need to poll for it before executing an instruction.
549 *
550 * NOTE that in several of these cases the "stall" mode might be useful.
551 * It'd let us queue a few operations together... prepare/finish might
552 * be the places to enable/disable that mode.
553 */
554
555 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
556 {
557 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
558 }
559
560 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
561 {
562 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
563 return mem_ap_write_u32(a->armv7a_common.debug_ap,
564 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
565 }
566
567 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
568 uint32_t *dscr_p)
569 {
570 uint32_t dscr = DSCR_INSTR_COMP;
571 int retval;
572
573 if (dscr_p)
574 dscr = *dscr_p;
575
576 /* Wait for DTRRXfull */
577 int64_t then = timeval_ms();
578 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
579 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
580 a->armv7a_common.debug_base + CPUDBG_DSCR,
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584 if (timeval_ms() > then + 1000) {
585 LOG_ERROR("Timeout waiting for read dcc");
586 return ERROR_FAIL;
587 }
588 }
589
590 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
591 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
592 if (retval != ERROR_OK)
593 return retval;
594 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
595
596 if (dscr_p)
597 *dscr_p = dscr;
598
599 return retval;
600 }
601
602 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
603 {
604 struct cortex_a_common *a = dpm_to_a(dpm);
605 uint32_t dscr;
606 int retval;
607
608 /* set up invariant: INSTR_COMP is set after ever DPM operation */
609 int64_t then = timeval_ms();
610 for (;; ) {
611 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
612 a->armv7a_common.debug_base + CPUDBG_DSCR,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616 if ((dscr & DSCR_INSTR_COMP) != 0)
617 break;
618 if (timeval_ms() > then + 1000) {
619 LOG_ERROR("Timeout waiting for dpm prepare");
620 return ERROR_FAIL;
621 }
622 }
623
624 /* this "should never happen" ... */
625 if (dscr & DSCR_DTR_RX_FULL) {
626 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
627 /* Clear DCCRX */
628 retval = cortex_a_exec_opcode(
629 a->armv7a_common.arm.target,
630 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634 }
635
636 return retval;
637 }
638
639 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
640 {
641 /* REVISIT what could be done here? */
642 return ERROR_OK;
643 }
644
645 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
646 uint32_t opcode, uint32_t data)
647 {
648 struct cortex_a_common *a = dpm_to_a(dpm);
649 int retval;
650 uint32_t dscr = DSCR_INSTR_COMP;
651
652 retval = cortex_a_write_dcc(a, data);
653 if (retval != ERROR_OK)
654 return retval;
655
656 return cortex_a_exec_opcode(
657 a->armv7a_common.arm.target,
658 opcode,
659 &dscr);
660 }
661
662 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
663 uint32_t opcode, uint32_t data)
664 {
665 struct cortex_a_common *a = dpm_to_a(dpm);
666 uint32_t dscr = DSCR_INSTR_COMP;
667 int retval;
668
669 retval = cortex_a_write_dcc(a, data);
670 if (retval != ERROR_OK)
671 return retval;
672
673 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
674 retval = cortex_a_exec_opcode(
675 a->armv7a_common.arm.target,
676 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
677 &dscr);
678 if (retval != ERROR_OK)
679 return retval;
680
681 /* then the opcode, taking data from R0 */
682 retval = cortex_a_exec_opcode(
683 a->armv7a_common.arm.target,
684 opcode,
685 &dscr);
686
687 return retval;
688 }
689
690 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
691 {
692 struct target *target = dpm->arm->target;
693 uint32_t dscr = DSCR_INSTR_COMP;
694
695 /* "Prefetch flush" after modifying execution status in CPSR */
696 return cortex_a_exec_opcode(target,
697 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
698 &dscr);
699 }
700
701 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
702 uint32_t opcode, uint32_t *data)
703 {
704 struct cortex_a_common *a = dpm_to_a(dpm);
705 int retval;
706 uint32_t dscr = DSCR_INSTR_COMP;
707
708 /* the opcode, writing data to DCC */
709 retval = cortex_a_exec_opcode(
710 a->armv7a_common.arm.target,
711 opcode,
712 &dscr);
713 if (retval != ERROR_OK)
714 return retval;
715
716 return cortex_a_read_dcc(a, data, &dscr);
717 }
718
719
720 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
721 uint32_t opcode, uint32_t *data)
722 {
723 struct cortex_a_common *a = dpm_to_a(dpm);
724 uint32_t dscr = DSCR_INSTR_COMP;
725 int retval;
726
727 /* the opcode, writing data to R0 */
728 retval = cortex_a_exec_opcode(
729 a->armv7a_common.arm.target,
730 opcode,
731 &dscr);
732 if (retval != ERROR_OK)
733 return retval;
734
735 /* write R0 to DCC */
736 retval = cortex_a_exec_opcode(
737 a->armv7a_common.arm.target,
738 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
739 &dscr);
740 if (retval != ERROR_OK)
741 return retval;
742
743 return cortex_a_read_dcc(a, data, &dscr);
744 }
745
746 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
747 uint32_t addr, uint32_t control)
748 {
749 struct cortex_a_common *a = dpm_to_a(dpm);
750 uint32_t vr = a->armv7a_common.debug_base;
751 uint32_t cr = a->armv7a_common.debug_base;
752 int retval;
753
754 switch (index_t) {
755 case 0 ... 15: /* breakpoints */
756 vr += CPUDBG_BVR_BASE;
757 cr += CPUDBG_BCR_BASE;
758 break;
759 case 16 ... 31: /* watchpoints */
760 vr += CPUDBG_WVR_BASE;
761 cr += CPUDBG_WCR_BASE;
762 index_t -= 16;
763 break;
764 default:
765 return ERROR_FAIL;
766 }
767 vr += 4 * index_t;
768 cr += 4 * index_t;
769
770 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
771 (unsigned) vr, (unsigned) cr);
772
773 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
774 vr, addr);
775 if (retval != ERROR_OK)
776 return retval;
777 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
778 cr, control);
779 return retval;
780 }
781
782 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
783 {
784 struct cortex_a_common *a = dpm_to_a(dpm);
785 uint32_t cr;
786
787 switch (index_t) {
788 case 0 ... 15:
789 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
790 break;
791 case 16 ... 31:
792 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
793 index_t -= 16;
794 break;
795 default:
796 return ERROR_FAIL;
797 }
798 cr += 4 * index_t;
799
800 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
801
802 /* clear control register */
803 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
804 }
805
806 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
807 {
808 struct arm_dpm *dpm = &a->armv7a_common.dpm;
809 int retval;
810
811 dpm->arm = &a->armv7a_common.arm;
812 dpm->didr = didr;
813
814 dpm->prepare = cortex_a_dpm_prepare;
815 dpm->finish = cortex_a_dpm_finish;
816
817 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
818 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
819 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
820
821 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
822 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
823
824 dpm->bpwp_enable = cortex_a_bpwp_enable;
825 dpm->bpwp_disable = cortex_a_bpwp_disable;
826
827 retval = arm_dpm_setup(dpm);
828 if (retval == ERROR_OK)
829 retval = arm_dpm_initialize(dpm);
830
831 return retval;
832 }
833 static struct target *get_cortex_a(struct target *target, int32_t coreid)
834 {
835 struct target_list *head;
836 struct target *curr;
837
838 head = target->head;
839 while (head != (struct target_list *)NULL) {
840 curr = head->target;
841 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
842 return curr;
843 head = head->next;
844 }
845 return target;
846 }
847 static int cortex_a_halt(struct target *target);
848
849 static int cortex_a_halt_smp(struct target *target)
850 {
851 int retval = 0;
852 struct target_list *head;
853 struct target *curr;
854 head = target->head;
855 while (head != (struct target_list *)NULL) {
856 curr = head->target;
857 if ((curr != target) && (curr->state != TARGET_HALTED))
858 retval += cortex_a_halt(curr);
859 head = head->next;
860 }
861 return retval;
862 }
863
864 static int update_halt_gdb(struct target *target)
865 {
866 int retval = 0;
867 if (target->gdb_service && target->gdb_service->core[0] == -1) {
868 target->gdb_service->target = target;
869 target->gdb_service->core[0] = target->coreid;
870 retval += cortex_a_halt_smp(target);
871 }
872 return retval;
873 }
874
875 /*
876 * Cortex-A Run control
877 */
878
879 static int cortex_a_poll(struct target *target)
880 {
881 int retval = ERROR_OK;
882 uint32_t dscr;
883 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
884 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
885 enum target_state prev_target_state = target->state;
886 /* toggle to another core is done by gdb as follow */
887 /* maint packet J core_id */
888 /* continue */
889 /* the next polling trigger an halt event sent to gdb */
890 if ((target->state == TARGET_HALTED) && (target->smp) &&
891 (target->gdb_service) &&
892 (target->gdb_service->target == NULL)) {
893 target->gdb_service->target =
894 get_cortex_a(target, target->gdb_service->core[1]);
895 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
896 return retval;
897 }
898 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
899 armv7a->debug_base + CPUDBG_DSCR, &dscr);
900 if (retval != ERROR_OK)
901 return retval;
902 cortex_a->cpudbg_dscr = dscr;
903
904 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
905 if (prev_target_state != TARGET_HALTED) {
906 /* We have a halting debug event */
907 LOG_DEBUG("Target halted");
908 target->state = TARGET_HALTED;
909 if ((prev_target_state == TARGET_RUNNING)
910 || (prev_target_state == TARGET_UNKNOWN)
911 || (prev_target_state == TARGET_RESET)) {
912 retval = cortex_a_debug_entry(target);
913 if (retval != ERROR_OK)
914 return retval;
915 if (target->smp) {
916 retval = update_halt_gdb(target);
917 if (retval != ERROR_OK)
918 return retval;
919 }
920 target_call_event_callbacks(target,
921 TARGET_EVENT_HALTED);
922 }
923 if (prev_target_state == TARGET_DEBUG_RUNNING) {
924 LOG_DEBUG(" ");
925
926 retval = cortex_a_debug_entry(target);
927 if (retval != ERROR_OK)
928 return retval;
929 if (target->smp) {
930 retval = update_halt_gdb(target);
931 if (retval != ERROR_OK)
932 return retval;
933 }
934
935 target_call_event_callbacks(target,
936 TARGET_EVENT_DEBUG_HALTED);
937 }
938 }
939 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
940 target->state = TARGET_RUNNING;
941 else {
942 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
943 target->state = TARGET_UNKNOWN;
944 }
945
946 return retval;
947 }
948
949 static int cortex_a_halt(struct target *target)
950 {
951 int retval = ERROR_OK;
952 uint32_t dscr;
953 struct armv7a_common *armv7a = target_to_armv7a(target);
954
955 /*
956 * Tell the core to be halted by writing DRCR with 0x1
957 * and then wait for the core to be halted.
958 */
959 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
960 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
961 if (retval != ERROR_OK)
962 return retval;
963
964 /*
965 * enter halting debug mode
966 */
967 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
968 armv7a->debug_base + CPUDBG_DSCR, &dscr);
969 if (retval != ERROR_OK)
970 return retval;
971
972 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
973 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
974 if (retval != ERROR_OK)
975 return retval;
976
977 int64_t then = timeval_ms();
978 for (;; ) {
979 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
980 armv7a->debug_base + CPUDBG_DSCR, &dscr);
981 if (retval != ERROR_OK)
982 return retval;
983 if ((dscr & DSCR_CORE_HALTED) != 0)
984 break;
985 if (timeval_ms() > then + 1000) {
986 LOG_ERROR("Timeout waiting for halt");
987 return ERROR_FAIL;
988 }
989 }
990
991 target->debug_reason = DBG_REASON_DBGRQ;
992
993 return ERROR_OK;
994 }
995
996 static int cortex_a_internal_restore(struct target *target, int current,
997 uint32_t *address, int handle_breakpoints, int debug_execution)
998 {
999 struct armv7a_common *armv7a = target_to_armv7a(target);
1000 struct arm *arm = &armv7a->arm;
1001 int retval;
1002 uint32_t resume_pc;
1003
1004 if (!debug_execution)
1005 target_free_all_working_areas(target);
1006
1007 #if 0
1008 if (debug_execution) {
1009 /* Disable interrupts */
1010 /* We disable interrupts in the PRIMASK register instead of
1011 * masking with C_MASKINTS,
1012 * This is probably the same issue as Cortex-M3 Errata 377493:
1013 * C_MASKINTS in parallel with disabled interrupts can cause
1014 * local faults to not be taken. */
1015 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
1016 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
1017 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
1018
1019 /* Make sure we are in Thumb mode */
1020 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
1021 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
1022 32) | (1 << 24));
1023 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
1024 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
1025 }
1026 #endif
1027
1028 /* current = 1: continue on current pc, otherwise continue at <address> */
1029 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
1030 if (!current)
1031 resume_pc = *address;
1032 else
1033 *address = resume_pc;
1034
1035 /* Make sure that the Armv7 gdb thumb fixups does not
1036 * kill the return address
1037 */
1038 switch (arm->core_state) {
1039 case ARM_STATE_ARM:
1040 resume_pc &= 0xFFFFFFFC;
1041 break;
1042 case ARM_STATE_THUMB:
1043 case ARM_STATE_THUMB_EE:
1044 /* When the return address is loaded into PC
1045 * bit 0 must be 1 to stay in Thumb state
1046 */
1047 resume_pc |= 0x1;
1048 break;
1049 case ARM_STATE_JAZELLE:
1050 LOG_ERROR("How do I resume into Jazelle state??");
1051 return ERROR_FAIL;
1052 }
1053 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1054 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1055 arm->pc->dirty = 1;
1056 arm->pc->valid = 1;
1057
1058 /* restore dpm_mode at system halt */
1059 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1060 /* called it now before restoring context because it uses cpu
1061 * register r0 for restoring cp15 control register */
1062 retval = cortex_a_restore_cp15_control_reg(target);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 retval = cortex_a_restore_context(target, handle_breakpoints);
1066 if (retval != ERROR_OK)
1067 return retval;
1068 target->debug_reason = DBG_REASON_NOTHALTED;
1069 target->state = TARGET_RUNNING;
1070
1071 /* registers are now invalid */
1072 register_cache_invalidate(arm->core_cache);
1073
1074 #if 0
1075 /* the front-end may request us not to handle breakpoints */
1076 if (handle_breakpoints) {
1077 /* Single step past breakpoint at current address */
1078 breakpoint = breakpoint_find(target, resume_pc);
1079 if (breakpoint) {
1080 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1081 cortex_m3_unset_breakpoint(target, breakpoint);
1082 cortex_m3_single_step_core(target);
1083 cortex_m3_set_breakpoint(target, breakpoint);
1084 }
1085 }
1086
1087 #endif
1088 return retval;
1089 }
1090
1091 static int cortex_a_internal_restart(struct target *target)
1092 {
1093 struct armv7a_common *armv7a = target_to_armv7a(target);
1094 struct arm *arm = &armv7a->arm;
1095 int retval;
1096 uint32_t dscr;
1097 /*
1098 * * Restart core and wait for it to be started. Clear ITRen and sticky
1099 * * exception flags: see ARMv7 ARM, C5.9.
1100 *
1101 * REVISIT: for single stepping, we probably want to
1102 * disable IRQs by default, with optional override...
1103 */
1104
1105 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1106 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1107 if (retval != ERROR_OK)
1108 return retval;
1109
1110 if ((dscr & DSCR_INSTR_COMP) == 0)
1111 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1112
1113 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1114 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1115 if (retval != ERROR_OK)
1116 return retval;
1117
1118 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1119 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1120 DRCR_CLEAR_EXCEPTIONS);
1121 if (retval != ERROR_OK)
1122 return retval;
1123
1124 int64_t then = timeval_ms();
1125 for (;; ) {
1126 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1127 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1128 if (retval != ERROR_OK)
1129 return retval;
1130 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1131 break;
1132 if (timeval_ms() > then + 1000) {
1133 LOG_ERROR("Timeout waiting for resume");
1134 return ERROR_FAIL;
1135 }
1136 }
1137
1138 target->debug_reason = DBG_REASON_NOTHALTED;
1139 target->state = TARGET_RUNNING;
1140
1141 /* registers are now invalid */
1142 register_cache_invalidate(arm->core_cache);
1143
1144 return ERROR_OK;
1145 }
1146
1147 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1148 {
1149 int retval = 0;
1150 struct target_list *head;
1151 struct target *curr;
1152 uint32_t address;
1153 head = target->head;
1154 while (head != (struct target_list *)NULL) {
1155 curr = head->target;
1156 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1157 /* resume current address , not in step mode */
1158 retval += cortex_a_internal_restore(curr, 1, &address,
1159 handle_breakpoints, 0);
1160 retval += cortex_a_internal_restart(curr);
1161 }
1162 head = head->next;
1163
1164 }
1165 return retval;
1166 }
1167
1168 static int cortex_a_resume(struct target *target, int current,
1169 uint32_t address, int handle_breakpoints, int debug_execution)
1170 {
1171 int retval = 0;
1172 /* dummy resume for smp toggle in order to reduce gdb impact */
1173 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1174 /* simulate a start and halt of target */
1175 target->gdb_service->target = NULL;
1176 target->gdb_service->core[0] = target->gdb_service->core[1];
1177 /* fake resume at next poll we play the target core[1], see poll*/
1178 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1179 return 0;
1180 }
1181 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1182 if (target->smp) {
1183 target->gdb_service->core[0] = -1;
1184 retval = cortex_a_restore_smp(target, handle_breakpoints);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 }
1188 cortex_a_internal_restart(target);
1189
1190 if (!debug_execution) {
1191 target->state = TARGET_RUNNING;
1192 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1193 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1194 } else {
1195 target->state = TARGET_DEBUG_RUNNING;
1196 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1197 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1198 }
1199
1200 return ERROR_OK;
1201 }
1202
1203 static int cortex_a_debug_entry(struct target *target)
1204 {
1205 int i;
1206 uint32_t regfile[16], cpsr, dscr;
1207 int retval = ERROR_OK;
1208 struct working_area *regfile_working_area = NULL;
1209 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1210 struct armv7a_common *armv7a = target_to_armv7a(target);
1211 struct arm *arm = &armv7a->arm;
1212 struct reg *reg;
1213
1214 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1215
1216 /* REVISIT surely we should not re-read DSCR !! */
1217 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1218 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1219 if (retval != ERROR_OK)
1220 return retval;
1221
1222 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1223 * imprecise data aborts get discarded by issuing a Data
1224 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1225 */
1226
1227 /* Enable the ITR execution once we are in debug mode */
1228 dscr |= DSCR_ITR_EN;
1229 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1230 armv7a->debug_base + CPUDBG_DSCR, dscr);
1231 if (retval != ERROR_OK)
1232 return retval;
1233
1234 /* Examine debug reason */
1235 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1236
1237 /* save address of instruction that triggered the watchpoint? */
1238 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1239 uint32_t wfar;
1240
1241 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1242 armv7a->debug_base + CPUDBG_WFAR,
1243 &wfar);
1244 if (retval != ERROR_OK)
1245 return retval;
1246 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1247 }
1248
1249 /* REVISIT fast_reg_read is never set ... */
1250
1251 /* Examine target state and mode */
1252 if (cortex_a->fast_reg_read)
1253 target_alloc_working_area(target, 64, &regfile_working_area);
1254
1255 /* First load register acessible through core debug port*/
1256 if (!regfile_working_area)
1257 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1258 else {
1259 retval = cortex_a_read_regs_through_mem(target,
1260 regfile_working_area->address, regfile);
1261
1262 target_free_working_area(target, regfile_working_area);
1263 if (retval != ERROR_OK)
1264 return retval;
1265
1266 /* read Current PSR */
1267 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1268 /* store current cpsr */
1269 if (retval != ERROR_OK)
1270 return retval;
1271
1272 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1273
1274 arm_set_cpsr(arm, cpsr);
1275
1276 /* update cache */
1277 for (i = 0; i <= ARM_PC; i++) {
1278 reg = arm_reg_current(arm, i);
1279
1280 buf_set_u32(reg->value, 0, 32, regfile[i]);
1281 reg->valid = 1;
1282 reg->dirty = 0;
1283 }
1284
1285 /* Fixup PC Resume Address */
1286 if (cpsr & (1 << 5)) {
1287 /* T bit set for Thumb or ThumbEE state */
1288 regfile[ARM_PC] -= 4;
1289 } else {
1290 /* ARM state */
1291 regfile[ARM_PC] -= 8;
1292 }
1293
1294 reg = arm->pc;
1295 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1296 reg->dirty = reg->valid;
1297 }
1298
1299 #if 0
1300 /* TODO, Move this */
1301 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1302 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1303 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1304
1305 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1306 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1307
1308 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1309 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1310 #endif
1311
1312 /* Are we in an exception handler */
1313 /* armv4_5->exception_number = 0; */
1314 if (armv7a->post_debug_entry) {
1315 retval = armv7a->post_debug_entry(target);
1316 if (retval != ERROR_OK)
1317 return retval;
1318 }
1319
1320 return retval;
1321 }
1322
1323 static int cortex_a_post_debug_entry(struct target *target)
1324 {
1325 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1326 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1327 int retval;
1328
1329 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1330 retval = armv7a->arm.mrc(target, 15,
1331 0, 0, /* op1, op2 */
1332 1, 0, /* CRn, CRm */
1333 &cortex_a->cp15_control_reg);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1337 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1338
1339 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1340 armv7a_identify_cache(target);
1341
1342 if (armv7a->is_armv7r) {
1343 armv7a->armv7a_mmu.mmu_enabled = 0;
1344 } else {
1345 armv7a->armv7a_mmu.mmu_enabled =
1346 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1347 }
1348 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1349 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1350 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1351 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1352 cortex_a->curr_mode = armv7a->arm.core_mode;
1353
1354 /* switch to SVC mode to read DACR */
1355 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1356 armv7a->arm.mrc(target, 15,
1357 0, 0, 3, 0,
1358 &cortex_a->cp15_dacr_reg);
1359
1360 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1361 cortex_a->cp15_dacr_reg);
1362
1363 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1364 return ERROR_OK;
1365 }
1366
1367 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1368 {
1369 struct armv7a_common *armv7a = target_to_armv7a(target);
1370 uint32_t dscr;
1371
1372 /* Read DSCR */
1373 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1374 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1375 if (ERROR_OK != retval)
1376 return retval;
1377
1378 /* clear bitfield */
1379 dscr &= ~bit_mask;
1380 /* put new value */
1381 dscr |= value & bit_mask;
1382
1383 /* write new DSCR */
1384 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1385 armv7a->debug_base + CPUDBG_DSCR, dscr);
1386 return retval;
1387 }
1388
1389 static int cortex_a_step(struct target *target, int current, uint32_t address,
1390 int handle_breakpoints)
1391 {
1392 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1393 struct armv7a_common *armv7a = target_to_armv7a(target);
1394 struct arm *arm = &armv7a->arm;
1395 struct breakpoint *breakpoint = NULL;
1396 struct breakpoint stepbreakpoint;
1397 struct reg *r;
1398 int retval;
1399
1400 if (target->state != TARGET_HALTED) {
1401 LOG_WARNING("target not halted");
1402 return ERROR_TARGET_NOT_HALTED;
1403 }
1404
1405 /* current = 1: continue on current pc, otherwise continue at <address> */
1406 r = arm->pc;
1407 if (!current)
1408 buf_set_u32(r->value, 0, 32, address);
1409 else
1410 address = buf_get_u32(r->value, 0, 32);
1411
1412 /* The front-end may request us not to handle breakpoints.
1413 * But since Cortex-A uses breakpoint for single step,
1414 * we MUST handle breakpoints.
1415 */
1416 handle_breakpoints = 1;
1417 if (handle_breakpoints) {
1418 breakpoint = breakpoint_find(target, address);
1419 if (breakpoint)
1420 cortex_a_unset_breakpoint(target, breakpoint);
1421 }
1422
1423 /* Setup single step breakpoint */
1424 stepbreakpoint.address = address;
1425 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1426 ? 2 : 4;
1427 stepbreakpoint.type = BKPT_HARD;
1428 stepbreakpoint.set = 0;
1429
1430 /* Disable interrupts during single step if requested */
1431 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1432 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1433 if (ERROR_OK != retval)
1434 return retval;
1435 }
1436
1437 /* Break on IVA mismatch */
1438 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1439
1440 target->debug_reason = DBG_REASON_SINGLESTEP;
1441
1442 retval = cortex_a_resume(target, 1, address, 0, 0);
1443 if (retval != ERROR_OK)
1444 return retval;
1445
1446 int64_t then = timeval_ms();
1447 while (target->state != TARGET_HALTED) {
1448 retval = cortex_a_poll(target);
1449 if (retval != ERROR_OK)
1450 return retval;
1451 if (timeval_ms() > then + 1000) {
1452 LOG_ERROR("timeout waiting for target halt");
1453 return ERROR_FAIL;
1454 }
1455 }
1456
1457 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1458
1459 /* Re-enable interrupts if they were disabled */
1460 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1461 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1462 if (ERROR_OK != retval)
1463 return retval;
1464 }
1465
1466
1467 target->debug_reason = DBG_REASON_BREAKPOINT;
1468
1469 if (breakpoint)
1470 cortex_a_set_breakpoint(target, breakpoint, 0);
1471
1472 if (target->state != TARGET_HALTED)
1473 LOG_DEBUG("target stepped");
1474
1475 return ERROR_OK;
1476 }
1477
1478 static int cortex_a_restore_context(struct target *target, bool bpwp)
1479 {
1480 struct armv7a_common *armv7a = target_to_armv7a(target);
1481
1482 LOG_DEBUG(" ");
1483
1484 if (armv7a->pre_restore_context)
1485 armv7a->pre_restore_context(target);
1486
1487 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1488 }
1489
1490 /*
1491 * Cortex-A Breakpoint and watchpoint functions
1492 */
1493
1494 /* Setup hardware Breakpoint Register Pair */
1495 static int cortex_a_set_breakpoint(struct target *target,
1496 struct breakpoint *breakpoint, uint8_t matchmode)
1497 {
1498 int retval;
1499 int brp_i = 0;
1500 uint32_t control;
1501 uint8_t byte_addr_select = 0x0F;
1502 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1503 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1504 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1505
1506 if (breakpoint->set) {
1507 LOG_WARNING("breakpoint already set");
1508 return ERROR_OK;
1509 }
1510
1511 if (breakpoint->type == BKPT_HARD) {
1512 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1513 brp_i++;
1514 if (brp_i >= cortex_a->brp_num) {
1515 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1516 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1517 }
1518 breakpoint->set = brp_i + 1;
1519 if (breakpoint->length == 2)
1520 byte_addr_select = (3 << (breakpoint->address & 0x02));
1521 control = ((matchmode & 0x7) << 20)
1522 | (byte_addr_select << 5)
1523 | (3 << 1) | 1;
1524 brp_list[brp_i].used = 1;
1525 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1526 brp_list[brp_i].control = control;
1527 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1528 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1529 brp_list[brp_i].value);
1530 if (retval != ERROR_OK)
1531 return retval;
1532 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1533 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1534 brp_list[brp_i].control);
1535 if (retval != ERROR_OK)
1536 return retval;
1537 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1538 brp_list[brp_i].control,
1539 brp_list[brp_i].value);
1540 } else if (breakpoint->type == BKPT_SOFT) {
1541 uint8_t code[4];
1542 if (breakpoint->length == 2)
1543 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1544 else
1545 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1546 retval = target_read_memory(target,
1547 breakpoint->address & 0xFFFFFFFE,
1548 breakpoint->length, 1,
1549 breakpoint->orig_instr);
1550 if (retval != ERROR_OK)
1551 return retval;
1552
1553 /* make sure data cache is cleaned & invalidated down to PoC */
1554 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1555 armv7a_cache_flush_virt(target, breakpoint->address,
1556 breakpoint->length);
1557 }
1558
1559 retval = target_write_memory(target,
1560 breakpoint->address & 0xFFFFFFFE,
1561 breakpoint->length, 1, code);
1562 if (retval != ERROR_OK)
1563 return retval;
1564
1565 /* update i-cache at breakpoint location */
1566 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1567 breakpoint->length);
1568 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1569 breakpoint->length);
1570
1571 breakpoint->set = 0x11; /* Any nice value but 0 */
1572 }
1573
1574 return ERROR_OK;
1575 }
1576
1577 static int cortex_a_set_context_breakpoint(struct target *target,
1578 struct breakpoint *breakpoint, uint8_t matchmode)
1579 {
1580 int retval = ERROR_FAIL;
1581 int brp_i = 0;
1582 uint32_t control;
1583 uint8_t byte_addr_select = 0x0F;
1584 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1585 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1586 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1587
1588 if (breakpoint->set) {
1589 LOG_WARNING("breakpoint already set");
1590 return retval;
1591 }
1592 /*check available context BRPs*/
1593 while ((brp_list[brp_i].used ||
1594 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1595 brp_i++;
1596
1597 if (brp_i >= cortex_a->brp_num) {
1598 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1599 return ERROR_FAIL;
1600 }
1601
1602 breakpoint->set = brp_i + 1;
1603 control = ((matchmode & 0x7) << 20)
1604 | (byte_addr_select << 5)
1605 | (3 << 1) | 1;
1606 brp_list[brp_i].used = 1;
1607 brp_list[brp_i].value = (breakpoint->asid);
1608 brp_list[brp_i].control = control;
1609 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1610 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1611 brp_list[brp_i].value);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1615 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1616 brp_list[brp_i].control);
1617 if (retval != ERROR_OK)
1618 return retval;
1619 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1620 brp_list[brp_i].control,
1621 brp_list[brp_i].value);
1622 return ERROR_OK;
1623
1624 }
1625
1626 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1627 {
1628 int retval = ERROR_FAIL;
1629 int brp_1 = 0; /* holds the contextID pair */
1630 int brp_2 = 0; /* holds the IVA pair */
1631 uint32_t control_CTX, control_IVA;
1632 uint8_t CTX_byte_addr_select = 0x0F;
1633 uint8_t IVA_byte_addr_select = 0x0F;
1634 uint8_t CTX_machmode = 0x03;
1635 uint8_t IVA_machmode = 0x01;
1636 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1637 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1638 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1639
1640 if (breakpoint->set) {
1641 LOG_WARNING("breakpoint already set");
1642 return retval;
1643 }
1644 /*check available context BRPs*/
1645 while ((brp_list[brp_1].used ||
1646 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1647 brp_1++;
1648
1649 printf("brp(CTX) found num: %d\n", brp_1);
1650 if (brp_1 >= cortex_a->brp_num) {
1651 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1652 return ERROR_FAIL;
1653 }
1654
1655 while ((brp_list[brp_2].used ||
1656 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1657 brp_2++;
1658
1659 printf("brp(IVA) found num: %d\n", brp_2);
1660 if (brp_2 >= cortex_a->brp_num) {
1661 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1662 return ERROR_FAIL;
1663 }
1664
1665 breakpoint->set = brp_1 + 1;
1666 breakpoint->linked_BRP = brp_2;
1667 control_CTX = ((CTX_machmode & 0x7) << 20)
1668 | (brp_2 << 16)
1669 | (0 << 14)
1670 | (CTX_byte_addr_select << 5)
1671 | (3 << 1) | 1;
1672 brp_list[brp_1].used = 1;
1673 brp_list[brp_1].value = (breakpoint->asid);
1674 brp_list[brp_1].control = control_CTX;
1675 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1676 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1677 brp_list[brp_1].value);
1678 if (retval != ERROR_OK)
1679 return retval;
1680 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1681 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1682 brp_list[brp_1].control);
1683 if (retval != ERROR_OK)
1684 return retval;
1685
1686 control_IVA = ((IVA_machmode & 0x7) << 20)
1687 | (brp_1 << 16)
1688 | (IVA_byte_addr_select << 5)
1689 | (3 << 1) | 1;
1690 brp_list[brp_2].used = 1;
1691 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1692 brp_list[brp_2].control = control_IVA;
1693 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1694 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1695 brp_list[brp_2].value);
1696 if (retval != ERROR_OK)
1697 return retval;
1698 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1699 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1700 brp_list[brp_2].control);
1701 if (retval != ERROR_OK)
1702 return retval;
1703
1704 return ERROR_OK;
1705 }
1706
1707 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1708 {
1709 int retval;
1710 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1711 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1712 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1713
1714 if (!breakpoint->set) {
1715 LOG_WARNING("breakpoint not set");
1716 return ERROR_OK;
1717 }
1718
1719 if (breakpoint->type == BKPT_HARD) {
1720 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1721 int brp_i = breakpoint->set - 1;
1722 int brp_j = breakpoint->linked_BRP;
1723 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1724 LOG_DEBUG("Invalid BRP number in breakpoint");
1725 return ERROR_OK;
1726 }
1727 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1728 brp_list[brp_i].control, brp_list[brp_i].value);
1729 brp_list[brp_i].used = 0;
1730 brp_list[brp_i].value = 0;
1731 brp_list[brp_i].control = 0;
1732 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1733 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1734 brp_list[brp_i].control);
1735 if (retval != ERROR_OK)
1736 return retval;
1737 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1738 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1739 brp_list[brp_i].value);
1740 if (retval != ERROR_OK)
1741 return retval;
1742 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1743 LOG_DEBUG("Invalid BRP number in breakpoint");
1744 return ERROR_OK;
1745 }
1746 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1747 brp_list[brp_j].control, brp_list[brp_j].value);
1748 brp_list[brp_j].used = 0;
1749 brp_list[brp_j].value = 0;
1750 brp_list[brp_j].control = 0;
1751 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1752 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1753 brp_list[brp_j].control);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1757 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1758 brp_list[brp_j].value);
1759 if (retval != ERROR_OK)
1760 return retval;
1761 breakpoint->linked_BRP = 0;
1762 breakpoint->set = 0;
1763 return ERROR_OK;
1764
1765 } else {
1766 int brp_i = breakpoint->set - 1;
1767 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1768 LOG_DEBUG("Invalid BRP number in breakpoint");
1769 return ERROR_OK;
1770 }
1771 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1772 brp_list[brp_i].control, brp_list[brp_i].value);
1773 brp_list[brp_i].used = 0;
1774 brp_list[brp_i].value = 0;
1775 brp_list[brp_i].control = 0;
1776 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1777 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1778 brp_list[brp_i].control);
1779 if (retval != ERROR_OK)
1780 return retval;
1781 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1782 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1783 brp_list[brp_i].value);
1784 if (retval != ERROR_OK)
1785 return retval;
1786 breakpoint->set = 0;
1787 return ERROR_OK;
1788 }
1789 } else {
1790
1791 /* make sure data cache is cleaned & invalidated down to PoC */
1792 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1793 armv7a_cache_flush_virt(target, breakpoint->address,
1794 breakpoint->length);
1795 }
1796
1797 /* restore original instruction (kept in target endianness) */
1798 if (breakpoint->length == 4) {
1799 retval = target_write_memory(target,
1800 breakpoint->address & 0xFFFFFFFE,
1801 4, 1, breakpoint->orig_instr);
1802 if (retval != ERROR_OK)
1803 return retval;
1804 } else {
1805 retval = target_write_memory(target,
1806 breakpoint->address & 0xFFFFFFFE,
1807 2, 1, breakpoint->orig_instr);
1808 if (retval != ERROR_OK)
1809 return retval;
1810 }
1811
1812 /* update i-cache at breakpoint location */
1813 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1814 breakpoint->length);
1815 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1816 breakpoint->length);
1817 }
1818 breakpoint->set = 0;
1819
1820 return ERROR_OK;
1821 }
1822
1823 static int cortex_a_add_breakpoint(struct target *target,
1824 struct breakpoint *breakpoint)
1825 {
1826 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1827
1828 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1829 LOG_INFO("no hardware breakpoint available");
1830 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1831 }
1832
1833 if (breakpoint->type == BKPT_HARD)
1834 cortex_a->brp_num_available--;
1835
1836 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1837 }
1838
1839 static int cortex_a_add_context_breakpoint(struct target *target,
1840 struct breakpoint *breakpoint)
1841 {
1842 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1843
1844 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1845 LOG_INFO("no hardware breakpoint available");
1846 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1847 }
1848
1849 if (breakpoint->type == BKPT_HARD)
1850 cortex_a->brp_num_available--;
1851
1852 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1853 }
1854
1855 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1856 struct breakpoint *breakpoint)
1857 {
1858 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1859
1860 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1861 LOG_INFO("no hardware breakpoint available");
1862 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1863 }
1864
1865 if (breakpoint->type == BKPT_HARD)
1866 cortex_a->brp_num_available--;
1867
1868 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1869 }
1870
1871
1872 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1873 {
1874 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1875
1876 #if 0
1877 /* It is perfectly possible to remove breakpoints while the target is running */
1878 if (target->state != TARGET_HALTED) {
1879 LOG_WARNING("target not halted");
1880 return ERROR_TARGET_NOT_HALTED;
1881 }
1882 #endif
1883
1884 if (breakpoint->set) {
1885 cortex_a_unset_breakpoint(target, breakpoint);
1886 if (breakpoint->type == BKPT_HARD)
1887 cortex_a->brp_num_available++;
1888 }
1889
1890
1891 return ERROR_OK;
1892 }
1893
1894 /*
1895 * Cortex-A Reset functions
1896 */
1897
1898 static int cortex_a_assert_reset(struct target *target)
1899 {
1900 struct armv7a_common *armv7a = target_to_armv7a(target);
1901
1902 LOG_DEBUG(" ");
1903
1904 /* FIXME when halt is requested, make it work somehow... */
1905
1906 /* Issue some kind of warm reset. */
1907 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1908 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1909 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1910 /* REVISIT handle "pulls" cases, if there's
1911 * hardware that needs them to work.
1912 */
1913 jtag_add_reset(0, 1);
1914 } else {
1915 LOG_ERROR("%s: how to reset?", target_name(target));
1916 return ERROR_FAIL;
1917 }
1918
1919 /* registers are now invalid */
1920 register_cache_invalidate(armv7a->arm.core_cache);
1921
1922 target->state = TARGET_RESET;
1923
1924 return ERROR_OK;
1925 }
1926
1927 static int cortex_a_deassert_reset(struct target *target)
1928 {
1929 int retval;
1930
1931 LOG_DEBUG(" ");
1932
1933 /* be certain SRST is off */
1934 jtag_add_reset(0, 0);
1935
1936 retval = cortex_a_poll(target);
1937 if (retval != ERROR_OK)
1938 return retval;
1939
1940 if (target->reset_halt) {
1941 if (target->state != TARGET_HALTED) {
1942 LOG_WARNING("%s: ran after reset and before halt ...",
1943 target_name(target));
1944 retval = target_halt(target);
1945 if (retval != ERROR_OK)
1946 return retval;
1947 }
1948 }
1949
1950 return ERROR_OK;
1951 }
1952
1953 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1954 {
1955 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1956 * New desired mode must be in mode. Current value of DSCR must be in
1957 * *dscr, which is updated with new value.
1958 *
1959 * This function elides actually sending the mode-change over the debug
1960 * interface if the mode is already set as desired.
1961 */
1962 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1963 if (new_dscr != *dscr) {
1964 struct armv7a_common *armv7a = target_to_armv7a(target);
1965 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1966 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1967 if (retval == ERROR_OK)
1968 *dscr = new_dscr;
1969 return retval;
1970 } else {
1971 return ERROR_OK;
1972 }
1973 }
1974
1975 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1976 uint32_t value, uint32_t *dscr)
1977 {
1978 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1979 struct armv7a_common *armv7a = target_to_armv7a(target);
1980 int64_t then = timeval_ms();
1981 int retval;
1982
1983 while ((*dscr & mask) != value) {
1984 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1985 armv7a->debug_base + CPUDBG_DSCR, dscr);
1986 if (retval != ERROR_OK)
1987 return retval;
1988 if (timeval_ms() > then + 1000) {
1989 LOG_ERROR("timeout waiting for DSCR bit change");
1990 return ERROR_FAIL;
1991 }
1992 }
1993 return ERROR_OK;
1994 }
1995
1996 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1997 uint32_t *data, uint32_t *dscr)
1998 {
1999 int retval;
2000 struct armv7a_common *armv7a = target_to_armv7a(target);
2001
2002 /* Move from coprocessor to R0. */
2003 retval = cortex_a_exec_opcode(target, opcode, dscr);
2004 if (retval != ERROR_OK)
2005 return retval;
2006
2007 /* Move from R0 to DTRTX. */
2008 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2009 if (retval != ERROR_OK)
2010 return retval;
2011
2012 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2013 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2014 * must also check TXfull_l). Most of the time this will be free
2015 * because TXfull_l will be set immediately and cached in dscr. */
2016 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2017 DSCR_DTRTX_FULL_LATCHED, dscr);
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 /* Read the value transferred to DTRTX. */
2022 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2023 armv7a->debug_base + CPUDBG_DTRTX, data);
2024 if (retval != ERROR_OK)
2025 return retval;
2026
2027 return ERROR_OK;
2028 }
2029
2030 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2031 uint32_t *dfsr, uint32_t *dscr)
2032 {
2033 int retval;
2034
2035 if (dfar) {
2036 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2037 if (retval != ERROR_OK)
2038 return retval;
2039 }
2040
2041 if (dfsr) {
2042 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2043 if (retval != ERROR_OK)
2044 return retval;
2045 }
2046
2047 return ERROR_OK;
2048 }
2049
2050 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2051 uint32_t data, uint32_t *dscr)
2052 {
2053 int retval;
2054 struct armv7a_common *armv7a = target_to_armv7a(target);
2055
2056 /* Write the value into DTRRX. */
2057 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2058 armv7a->debug_base + CPUDBG_DTRRX, data);
2059 if (retval != ERROR_OK)
2060 return retval;
2061
2062 /* Move from DTRRX to R0. */
2063 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2064 if (retval != ERROR_OK)
2065 return retval;
2066
2067 /* Move from R0 to coprocessor. */
2068 retval = cortex_a_exec_opcode(target, opcode, dscr);
2069 if (retval != ERROR_OK)
2070 return retval;
2071
2072 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2073 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2074 * check RXfull_l). Most of the time this will be free because RXfull_l
2075 * will be cleared immediately and cached in dscr. */
2076 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2077 if (retval != ERROR_OK)
2078 return retval;
2079
2080 return ERROR_OK;
2081 }
2082
2083 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2084 uint32_t dfsr, uint32_t *dscr)
2085 {
2086 int retval;
2087
2088 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2089 if (retval != ERROR_OK)
2090 return retval;
2091
2092 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2093 if (retval != ERROR_OK)
2094 return retval;
2095
2096 return ERROR_OK;
2097 }
2098
2099 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2100 {
2101 uint32_t status, upper4;
2102
2103 if (dfsr & (1 << 9)) {
2104 /* LPAE format. */
2105 status = dfsr & 0x3f;
2106 upper4 = status >> 2;
2107 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2108 return ERROR_TARGET_TRANSLATION_FAULT;
2109 else if (status == 33)
2110 return ERROR_TARGET_UNALIGNED_ACCESS;
2111 else
2112 return ERROR_TARGET_DATA_ABORT;
2113 } else {
2114 /* Normal format. */
2115 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2116 if (status == 1)
2117 return ERROR_TARGET_UNALIGNED_ACCESS;
2118 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2119 status == 9 || status == 11 || status == 13 || status == 15)
2120 return ERROR_TARGET_TRANSLATION_FAULT;
2121 else
2122 return ERROR_TARGET_DATA_ABORT;
2123 }
2124 }
2125
2126 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2127 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2128 {
2129 /* Writes count objects of size size from *buffer. Old value of DSCR must
2130 * be in *dscr; updated to new value. This is slow because it works for
2131 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2132 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2133 * preferred.
2134 * Preconditions:
2135 * - Address is in R0.
2136 * - R0 is marked dirty.
2137 */
2138 struct armv7a_common *armv7a = target_to_armv7a(target);
2139 struct arm *arm = &armv7a->arm;
2140 int retval;
2141
2142 /* Mark register R1 as dirty, to use for transferring data. */
2143 arm_reg_current(arm, 1)->dirty = true;
2144
2145 /* Switch to non-blocking mode if not already in that mode. */
2146 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2147 if (retval != ERROR_OK)
2148 return retval;
2149
2150 /* Go through the objects. */
2151 while (count) {
2152 /* Write the value to store into DTRRX. */
2153 uint32_t data, opcode;
2154 if (size == 1)
2155 data = *buffer;
2156 else if (size == 2)
2157 data = target_buffer_get_u16(target, buffer);
2158 else
2159 data = target_buffer_get_u32(target, buffer);
2160 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2161 armv7a->debug_base + CPUDBG_DTRRX, data);
2162 if (retval != ERROR_OK)
2163 return retval;
2164
2165 /* Transfer the value from DTRRX to R1. */
2166 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2167 if (retval != ERROR_OK)
2168 return retval;
2169
2170 /* Write the value transferred to R1 into memory. */
2171 if (size == 1)
2172 opcode = ARMV4_5_STRB_IP(1, 0);
2173 else if (size == 2)
2174 opcode = ARMV4_5_STRH_IP(1, 0);
2175 else
2176 opcode = ARMV4_5_STRW_IP(1, 0);
2177 retval = cortex_a_exec_opcode(target, opcode, dscr);
2178 if (retval != ERROR_OK)
2179 return retval;
2180
2181 /* Check for faults and return early. */
2182 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2183 return ERROR_OK; /* A data fault is not considered a system failure. */
2184
2185 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2186 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2187 * must also check RXfull_l). Most of the time this will be free
2188 * because RXfull_l will be cleared immediately and cached in dscr. */
2189 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2190 if (retval != ERROR_OK)
2191 return retval;
2192
2193 /* Advance. */
2194 buffer += size;
2195 --count;
2196 }
2197
2198 return ERROR_OK;
2199 }
2200
2201 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2202 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2203 {
2204 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2205 * in *dscr; updated to new value. This is fast but only works for
2206 * word-sized objects at aligned addresses.
2207 * Preconditions:
2208 * - Address is in R0 and must be a multiple of 4.
2209 * - R0 is marked dirty.
2210 */
2211 struct armv7a_common *armv7a = target_to_armv7a(target);
2212 int retval;
2213
2214 /* Switch to fast mode if not already in that mode. */
2215 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2216 if (retval != ERROR_OK)
2217 return retval;
2218
2219 /* Latch STC instruction. */
2220 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2221 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2222 if (retval != ERROR_OK)
2223 return retval;
2224
2225 /* Transfer all the data and issue all the instructions. */
2226 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2227 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2228 }
2229
2230 static int cortex_a_write_apb_ab_memory(struct target *target,
2231 uint32_t address, uint32_t size,
2232 uint32_t count, const uint8_t *buffer)
2233 {
2234 /* Write memory through APB-AP. */
2235 int retval, final_retval;
2236 struct armv7a_common *armv7a = target_to_armv7a(target);
2237 struct arm *arm = &armv7a->arm;
2238 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2239
2240 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2241 address, size, count);
2242 if (target->state != TARGET_HALTED) {
2243 LOG_WARNING("target not halted");
2244 return ERROR_TARGET_NOT_HALTED;
2245 }
2246
2247 if (!count)
2248 return ERROR_OK;
2249
2250 /* Clear any abort. */
2251 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2252 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2253 if (retval != ERROR_OK)
2254 return retval;
2255
2256 /* Read DSCR. */
2257 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2258 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2259 if (retval != ERROR_OK)
2260 return retval;
2261
2262 /* Switch to non-blocking mode if not already in that mode. */
2263 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2264 if (retval != ERROR_OK)
2265 goto out;
2266
2267 /* Mark R0 as dirty. */
2268 arm_reg_current(arm, 0)->dirty = true;
2269
2270 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2271 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2272 if (retval != ERROR_OK)
2273 goto out;
2274
2275 /* Get the memory address into R0. */
2276 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2277 armv7a->debug_base + CPUDBG_DTRRX, address);
2278 if (retval != ERROR_OK)
2279 goto out;
2280 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2281 if (retval != ERROR_OK)
2282 goto out;
2283
2284 if (size == 4 && (address % 4) == 0) {
2285 /* We are doing a word-aligned transfer, so use fast mode. */
2286 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2287 } else {
2288 /* Use slow path. */
2289 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2290 }
2291
2292 out:
2293 final_retval = retval;
2294
2295 /* Switch to non-blocking mode if not already in that mode. */
2296 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2297 if (final_retval == ERROR_OK)
2298 final_retval = retval;
2299
2300 /* Wait for last issued instruction to complete. */
2301 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2302 if (final_retval == ERROR_OK)
2303 final_retval = retval;
2304
2305 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2306 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2307 * check RXfull_l). Most of the time this will be free because RXfull_l
2308 * will be cleared immediately and cached in dscr. However, don't do this
2309 * if there is fault, because then the instruction might not have completed
2310 * successfully. */
2311 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2312 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2313 if (retval != ERROR_OK)
2314 return retval;
2315 }
2316
2317 /* If there were any sticky abort flags, clear them. */
2318 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2319 fault_dscr = dscr;
2320 mem_ap_write_atomic_u32(armv7a->debug_ap,
2321 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2322 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2323 } else {
2324 fault_dscr = 0;
2325 }
2326
2327 /* Handle synchronous data faults. */
2328 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2329 if (final_retval == ERROR_OK) {
2330 /* Final return value will reflect cause of fault. */
2331 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2332 if (retval == ERROR_OK) {
2333 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2334 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2335 } else
2336 final_retval = retval;
2337 }
2338 /* Fault destroyed DFAR/DFSR; restore them. */
2339 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2340 if (retval != ERROR_OK)
2341 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2342 }
2343
2344 /* Handle asynchronous data faults. */
2345 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2346 if (final_retval == ERROR_OK)
2347 /* No other error has been recorded so far, so keep this one. */
2348 final_retval = ERROR_TARGET_DATA_ABORT;
2349 }
2350
2351 /* If the DCC is nonempty, clear it. */
2352 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2353 uint32_t dummy;
2354 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2355 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2356 if (final_retval == ERROR_OK)
2357 final_retval = retval;
2358 }
2359 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2360 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2361 if (final_retval == ERROR_OK)
2362 final_retval = retval;
2363 }
2364
2365 /* Done. */
2366 return final_retval;
2367 }
2368
2369 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2370 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2371 {
2372 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2373 * in *dscr; updated to new value. This is slow because it works for
2374 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2375 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2376 * preferred.
2377 * Preconditions:
2378 * - Address is in R0.
2379 * - R0 is marked dirty.
2380 */
2381 struct armv7a_common *armv7a = target_to_armv7a(target);
2382 struct arm *arm = &armv7a->arm;
2383 int retval;
2384
2385 /* Mark register R1 as dirty, to use for transferring data. */
2386 arm_reg_current(arm, 1)->dirty = true;
2387
2388 /* Switch to non-blocking mode if not already in that mode. */
2389 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2390 if (retval != ERROR_OK)
2391 return retval;
2392
2393 /* Go through the objects. */
2394 while (count) {
2395 /* Issue a load of the appropriate size to R1. */
2396 uint32_t opcode, data;
2397 if (size == 1)
2398 opcode = ARMV4_5_LDRB_IP(1, 0);
2399 else if (size == 2)
2400 opcode = ARMV4_5_LDRH_IP(1, 0);
2401 else
2402 opcode = ARMV4_5_LDRW_IP(1, 0);
2403 retval = cortex_a_exec_opcode(target, opcode, dscr);
2404 if (retval != ERROR_OK)
2405 return retval;
2406
2407 /* Issue a write of R1 to DTRTX. */
2408 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2409 if (retval != ERROR_OK)
2410 return retval;
2411
2412 /* Check for faults and return early. */
2413 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2414 return ERROR_OK; /* A data fault is not considered a system failure. */
2415
2416 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2417 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2418 * must also check TXfull_l). Most of the time this will be free
2419 * because TXfull_l will be set immediately and cached in dscr. */
2420 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2421 DSCR_DTRTX_FULL_LATCHED, dscr);
2422 if (retval != ERROR_OK)
2423 return retval;
2424
2425 /* Read the value transferred to DTRTX into the buffer. */
2426 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2427 armv7a->debug_base + CPUDBG_DTRTX, &data);
2428 if (retval != ERROR_OK)
2429 return retval;
2430 if (size == 1)
2431 *buffer = (uint8_t) data;
2432 else if (size == 2)
2433 target_buffer_set_u16(target, buffer, (uint16_t) data);
2434 else
2435 target_buffer_set_u32(target, buffer, data);
2436
2437 /* Advance. */
2438 buffer += size;
2439 --count;
2440 }
2441
2442 return ERROR_OK;
2443 }
2444
2445 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2446 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2447 {
2448 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2449 * *dscr; updated to new value. This is fast but only works for word-sized
2450 * objects at aligned addresses.
2451 * Preconditions:
2452 * - Address is in R0 and must be a multiple of 4.
2453 * - R0 is marked dirty.
2454 */
2455 struct armv7a_common *armv7a = target_to_armv7a(target);
2456 uint32_t u32;
2457 int retval;
2458
2459 /* Switch to non-blocking mode if not already in that mode. */
2460 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2461 if (retval != ERROR_OK)
2462 return retval;
2463
2464 /* Issue the LDC instruction via a write to ITR. */
2465 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2466 if (retval != ERROR_OK)
2467 return retval;
2468
2469 count--;
2470
2471 if (count > 0) {
2472 /* Switch to fast mode if not already in that mode. */
2473 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2474 if (retval != ERROR_OK)
2475 return retval;
2476
2477 /* Latch LDC instruction. */
2478 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2479 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2480 if (retval != ERROR_OK)
2481 return retval;
2482
2483 /* Read the value transferred to DTRTX into the buffer. Due to fast
2484 * mode rules, this blocks until the instruction finishes executing and
2485 * then reissues the read instruction to read the next word from
2486 * memory. The last read of DTRTX in this call reads the second-to-last
2487 * word from memory and issues the read instruction for the last word.
2488 */
2489 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2490 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2491 if (retval != ERROR_OK)
2492 return retval;
2493
2494 /* Advance. */
2495 buffer += count * 4;
2496 }
2497
2498 /* Wait for last issued instruction to complete. */
2499 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2500 if (retval != ERROR_OK)
2501 return retval;
2502
2503 /* Switch to non-blocking mode if not already in that mode. */
2504 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2505 if (retval != ERROR_OK)
2506 return retval;
2507
2508 /* Check for faults and return early. */
2509 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2510 return ERROR_OK; /* A data fault is not considered a system failure. */
2511
2512 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2513 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2514 * check TXfull_l). Most of the time this will be free because TXfull_l
2515 * will be set immediately and cached in dscr. */
2516 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2517 DSCR_DTRTX_FULL_LATCHED, dscr);
2518 if (retval != ERROR_OK)
2519 return retval;
2520
2521 /* Read the value transferred to DTRTX into the buffer. This is the last
2522 * word. */
2523 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2524 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 target_buffer_set_u32(target, buffer, u32);
2528
2529 return ERROR_OK;
2530 }
2531
2532 static int cortex_a_read_apb_ab_memory(struct target *target,
2533 uint32_t address, uint32_t size,
2534 uint32_t count, uint8_t *buffer)
2535 {
2536 /* Read memory through APB-AP. */
2537 int retval, final_retval;
2538 struct armv7a_common *armv7a = target_to_armv7a(target);
2539 struct arm *arm = &armv7a->arm;
2540 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2541
2542 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2543 address, size, count);
2544 if (target->state != TARGET_HALTED) {
2545 LOG_WARNING("target not halted");
2546 return ERROR_TARGET_NOT_HALTED;
2547 }
2548
2549 if (!count)
2550 return ERROR_OK;
2551
2552 /* Clear any abort. */
2553 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2554 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2555 if (retval != ERROR_OK)
2556 return retval;
2557
2558 /* Read DSCR */
2559 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2560 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2561 if (retval != ERROR_OK)
2562 return retval;
2563
2564 /* Switch to non-blocking mode if not already in that mode. */
2565 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2566 if (retval != ERROR_OK)
2567 goto out;
2568
2569 /* Mark R0 as dirty. */
2570 arm_reg_current(arm, 0)->dirty = true;
2571
2572 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2573 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2574 if (retval != ERROR_OK)
2575 goto out;
2576
2577 /* Get the memory address into R0. */
2578 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2579 armv7a->debug_base + CPUDBG_DTRRX, address);
2580 if (retval != ERROR_OK)
2581 goto out;
2582 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2583 if (retval != ERROR_OK)
2584 goto out;
2585
2586 if (size == 4 && (address % 4) == 0) {
2587 /* We are doing a word-aligned transfer, so use fast mode. */
2588 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2589 } else {
2590 /* Use slow path. */
2591 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2592 }
2593
2594 out:
2595 final_retval = retval;
2596
2597 /* Switch to non-blocking mode if not already in that mode. */
2598 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2599 if (final_retval == ERROR_OK)
2600 final_retval = retval;
2601
2602 /* Wait for last issued instruction to complete. */
2603 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2604 if (final_retval == ERROR_OK)
2605 final_retval = retval;
2606
2607 /* If there were any sticky abort flags, clear them. */
2608 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2609 fault_dscr = dscr;
2610 mem_ap_write_atomic_u32(armv7a->debug_ap,
2611 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2612 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2613 } else {
2614 fault_dscr = 0;
2615 }
2616
2617 /* Handle synchronous data faults. */
2618 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2619 if (final_retval == ERROR_OK) {
2620 /* Final return value will reflect cause of fault. */
2621 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2622 if (retval == ERROR_OK) {
2623 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2624 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2625 } else
2626 final_retval = retval;
2627 }
2628 /* Fault destroyed DFAR/DFSR; restore them. */
2629 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2630 if (retval != ERROR_OK)
2631 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2632 }
2633
2634 /* Handle asynchronous data faults. */
2635 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2636 if (final_retval == ERROR_OK)
2637 /* No other error has been recorded so far, so keep this one. */
2638 final_retval = ERROR_TARGET_DATA_ABORT;
2639 }
2640
2641 /* If the DCC is nonempty, clear it. */
2642 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2643 uint32_t dummy;
2644 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2645 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2646 if (final_retval == ERROR_OK)
2647 final_retval = retval;
2648 }
2649 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2650 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2651 if (final_retval == ERROR_OK)
2652 final_retval = retval;
2653 }
2654
2655 /* Done. */
2656 return final_retval;
2657 }
2658
2659
2660 /*
2661 * Cortex-A Memory access
2662 *
2663 * This is same Cortex M3 but we must also use the correct
2664 * ap number for every access.
2665 */
2666
2667 static int cortex_a_read_phys_memory(struct target *target,
2668 uint32_t address, uint32_t size,
2669 uint32_t count, uint8_t *buffer)
2670 {
2671 struct armv7a_common *armv7a = target_to_armv7a(target);
2672 struct adiv5_dap *swjdp = armv7a->arm.dap;
2673 uint8_t apsel = swjdp->apsel;
2674 int retval;
2675
2676 if (!count || !buffer)
2677 return ERROR_COMMAND_SYNTAX_ERROR;
2678
2679 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2680 address, size, count);
2681
2682 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2683 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2684
2685 /* read memory through APB-AP */
2686 cortex_a_prep_memaccess(target, 1);
2687 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2688 cortex_a_post_memaccess(target, 1);
2689
2690 return retval;
2691 }
2692
2693 static int cortex_a_read_memory(struct target *target, uint32_t address,
2694 uint32_t size, uint32_t count, uint8_t *buffer)
2695 {
2696 int retval;
2697
2698 /* cortex_a handles unaligned memory access */
2699 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2700 size, count);
2701
2702 cortex_a_prep_memaccess(target, 0);
2703 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2704 cortex_a_post_memaccess(target, 0);
2705
2706 return retval;
2707 }
2708
2709 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2710 uint32_t size, uint32_t count, uint8_t *buffer)
2711 {
2712 int mmu_enabled = 0;
2713 uint32_t virt, phys;
2714 int retval;
2715 struct armv7a_common *armv7a = target_to_armv7a(target);
2716 struct adiv5_dap *swjdp = armv7a->arm.dap;
2717 uint8_t apsel = swjdp->apsel;
2718
2719 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2720 return target_read_memory(target, address, size, count, buffer);
2721
2722 /* cortex_a handles unaligned memory access */
2723 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2724 size, count);
2725
2726 /* determine if MMU was enabled on target stop */
2727 if (!armv7a->is_armv7r) {
2728 retval = cortex_a_mmu(target, &mmu_enabled);
2729 if (retval != ERROR_OK)
2730 return retval;
2731 }
2732
2733 if (mmu_enabled) {
2734 virt = address;
2735 retval = cortex_a_virt2phys(target, virt, &phys);
2736 if (retval != ERROR_OK)
2737 return retval;
2738
2739 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2740 virt, phys);
2741 address = phys;
2742 }
2743
2744 if (!count || !buffer)
2745 return ERROR_COMMAND_SYNTAX_ERROR;
2746
2747 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2748
2749 return retval;
2750 }
2751
2752 static int cortex_a_write_phys_memory(struct target *target,
2753 uint32_t address, uint32_t size,
2754 uint32_t count, const uint8_t *buffer)
2755 {
2756 struct armv7a_common *armv7a = target_to_armv7a(target);
2757 struct adiv5_dap *swjdp = armv7a->arm.dap;
2758 uint8_t apsel = swjdp->apsel;
2759 int retval;
2760
2761 if (!count || !buffer)
2762 return ERROR_COMMAND_SYNTAX_ERROR;
2763
2764 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2765 size, count);
2766
2767 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2768 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2769
2770 /* write memory through APB-AP */
2771 cortex_a_prep_memaccess(target, 1);
2772 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2773 cortex_a_post_memaccess(target, 1);
2774
2775 return retval;
2776 }
2777
2778 static int cortex_a_write_memory(struct target *target, uint32_t address,
2779 uint32_t size, uint32_t count, const uint8_t *buffer)
2780 {
2781 int retval;
2782
2783 /* cortex_a handles unaligned memory access */
2784 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2785 size, count);
2786
2787 /* memory writes bypass the caches, must flush before writing */
2788 armv7a_cache_auto_flush_on_write(target, address, size * count);
2789
2790 cortex_a_prep_memaccess(target, 0);
2791 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2792 cortex_a_post_memaccess(target, 0);
2793 return retval;
2794 }
2795
2796 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2797 uint32_t size, uint32_t count, const uint8_t *buffer)
2798 {
2799 int mmu_enabled = 0;
2800 uint32_t virt, phys;
2801 int retval;
2802 struct armv7a_common *armv7a = target_to_armv7a(target);
2803 struct adiv5_dap *swjdp = armv7a->arm.dap;
2804 uint8_t apsel = swjdp->apsel;
2805
2806 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2807 return target_write_memory(target, address, size, count, buffer);
2808
2809 /* cortex_a handles unaligned memory access */
2810 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2811 size, count);
2812
2813 /* determine if MMU was enabled on target stop */
2814 if (!armv7a->is_armv7r) {
2815 retval = cortex_a_mmu(target, &mmu_enabled);
2816 if (retval != ERROR_OK)
2817 return retval;
2818 }
2819
2820 if (mmu_enabled) {
2821 virt = address;
2822 retval = cortex_a_virt2phys(target, virt, &phys);
2823 if (retval != ERROR_OK)
2824 return retval;
2825
2826 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2827 virt,
2828 phys);
2829 address = phys;
2830 }
2831
2832 if (!count || !buffer)
2833 return ERROR_COMMAND_SYNTAX_ERROR;
2834
2835 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2836
2837 return retval;
2838 }
2839
2840 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2841 uint32_t count, uint8_t *buffer)
2842 {
2843 uint32_t size;
2844
2845 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2846 * will have something to do with the size we leave to it. */
2847 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2848 if (address & size) {
2849 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2850 if (retval != ERROR_OK)
2851 return retval;
2852 address += size;
2853 count -= size;
2854 buffer += size;
2855 }
2856 }
2857
2858 /* Read the data with as large access size as possible. */
2859 for (; size > 0; size /= 2) {
2860 uint32_t aligned = count - count % size;
2861 if (aligned > 0) {
2862 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2863 if (retval != ERROR_OK)
2864 return retval;
2865 address += aligned;
2866 count -= aligned;
2867 buffer += aligned;
2868 }
2869 }
2870
2871 return ERROR_OK;
2872 }
2873
2874 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2875 uint32_t count, const uint8_t *buffer)
2876 {
2877 uint32_t size;
2878
2879 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2880 * will have something to do with the size we leave to it. */
2881 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2882 if (address & size) {
2883 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2884 if (retval != ERROR_OK)
2885 return retval;
2886 address += size;
2887 count -= size;
2888 buffer += size;
2889 }
2890 }
2891
2892 /* Write the data with as large access size as possible. */
2893 for (; size > 0; size /= 2) {
2894 uint32_t aligned = count - count % size;
2895 if (aligned > 0) {
2896 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2897 if (retval != ERROR_OK)
2898 return retval;
2899 address += aligned;
2900 count -= aligned;
2901 buffer += aligned;
2902 }
2903 }
2904
2905 return ERROR_OK;
2906 }
2907
2908 static int cortex_a_handle_target_request(void *priv)
2909 {
2910 struct target *target = priv;
2911 struct armv7a_common *armv7a = target_to_armv7a(target);
2912 int retval;
2913
2914 if (!target_was_examined(target))
2915 return ERROR_OK;
2916 if (!target->dbg_msg_enabled)
2917 return ERROR_OK;
2918
2919 if (target->state == TARGET_RUNNING) {
2920 uint32_t request;
2921 uint32_t dscr;
2922 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2923 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2924
2925 /* check if we have data */
2926 int64_t then = timeval_ms();
2927 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2928 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2929 armv7a->debug_base + CPUDBG_DTRTX, &request);
2930 if (retval == ERROR_OK) {
2931 target_request(target, request);
2932 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2933 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2934 }
2935 if (timeval_ms() > then + 1000) {
2936 LOG_ERROR("Timeout waiting for dtr tx full");
2937 return ERROR_FAIL;
2938 }
2939 }
2940 }
2941
2942 return ERROR_OK;
2943 }
2944
2945 /*
2946 * Cortex-A target information and configuration
2947 */
2948
2949 static int cortex_a_examine_first(struct target *target)
2950 {
2951 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2952 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2953 struct adiv5_dap *swjdp = armv7a->arm.dap;
2954 int i;
2955 int retval = ERROR_OK;
2956 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2957
2958 retval = dap_dp_init(swjdp);
2959 if (retval != ERROR_OK) {
2960 LOG_ERROR("Could not initialize the debug port");
2961 return retval;
2962 }
2963
2964 /* Search for the APB-AB - it is needed for access to debug registers */
2965 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2966 if (retval != ERROR_OK) {
2967 LOG_ERROR("Could not find APB-AP for debug access");
2968 return retval;
2969 }
2970
2971 retval = mem_ap_init(armv7a->debug_ap);
2972 if (retval != ERROR_OK) {
2973 LOG_ERROR("Could not initialize the APB-AP");
2974 return retval;
2975 }
2976
2977 armv7a->debug_ap->memaccess_tck = 80;
2978
2979 /* Search for the AHB-AB.
2980 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2981 * can access system memory. */
2982 armv7a->memory_ap_available = false;
2983 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2984 if (retval == ERROR_OK) {
2985 retval = mem_ap_init(armv7a->memory_ap);
2986 if (retval == ERROR_OK)
2987 armv7a->memory_ap_available = true;
2988 else
2989 LOG_WARNING("Could not initialize AHB-AP for memory access - using APB-AP");
2990 } else {
2991 /* AHB-AP not found - use APB-AP */
2992 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2993 }
2994
2995 if (!target->dbgbase_set) {
2996 uint32_t dbgbase;
2997 /* Get ROM Table base */
2998 uint32_t apid;
2999 int32_t coreidx = target->coreid;
3000 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
3001 target->cmd_name);
3002 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
3003 if (retval != ERROR_OK)
3004 return retval;
3005 /* Lookup 0x15 -- Processor DAP */
3006 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
3007 &armv7a->debug_base, &coreidx);
3008 if (retval != ERROR_OK) {
3009 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
3010 target->cmd_name);
3011 return retval;
3012 }
3013 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
3014 target->coreid, armv7a->debug_base);
3015 } else
3016 armv7a->debug_base = target->dbgbase;
3017
3018 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3019 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3020 if (retval != ERROR_OK)
3021 return retval;
3022
3023 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3024 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3025 if (retval != ERROR_OK) {
3026 LOG_DEBUG("Examine %s failed", "CPUID");
3027 return retval;
3028 }
3029
3030 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3031 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
3032 if (retval != ERROR_OK) {
3033 LOG_DEBUG("Examine %s failed", "CTYPR");
3034 return retval;
3035 }
3036
3037 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3038 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
3039 if (retval != ERROR_OK) {
3040 LOG_DEBUG("Examine %s failed", "TTYPR");
3041 return retval;
3042 }
3043
3044 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3045 armv7a->debug_base + CPUDBG_DIDR, &didr);
3046 if (retval != ERROR_OK) {
3047 LOG_DEBUG("Examine %s failed", "DIDR");
3048 return retval;
3049 }
3050
3051 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3052 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
3053 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
3054 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3055
3056 cortex_a->cpuid = cpuid;
3057 cortex_a->ctypr = ctypr;
3058 cortex_a->ttypr = ttypr;
3059 cortex_a->didr = didr;
3060
3061 /* Unlocking the debug registers */
3062 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3063 CORTEX_A15_PARTNUM) {
3064
3065 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3066 armv7a->debug_base + CPUDBG_OSLAR,
3067 0);
3068
3069 if (retval != ERROR_OK)
3070 return retval;
3071
3072 }
3073 /* Unlocking the debug registers */
3074 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3075 CORTEX_A7_PARTNUM) {
3076
3077 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3078 armv7a->debug_base + CPUDBG_OSLAR,
3079 0);
3080
3081 if (retval != ERROR_OK)
3082 return retval;
3083
3084 }
3085 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3086 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3087
3088 if (retval != ERROR_OK)
3089 return retval;
3090
3091 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3092
3093 armv7a->arm.core_type = ARM_MODE_MON;
3094
3095 /* Avoid recreating the registers cache */
3096 if (!target_was_examined(target)) {
3097 retval = cortex_a_dpm_setup(cortex_a, didr);
3098 if (retval != ERROR_OK)
3099 return retval;
3100 }
3101
3102 /* Setup Breakpoint Register Pairs */
3103 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3104 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3105 cortex_a->brp_num_available = cortex_a->brp_num;
3106 free(cortex_a->brp_list);
3107 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3108 /* cortex_a->brb_enabled = ????; */
3109 for (i = 0; i < cortex_a->brp_num; i++) {
3110 cortex_a->brp_list[i].used = 0;
3111 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3112 cortex_a->brp_list[i].type = BRP_NORMAL;
3113 else
3114 cortex_a->brp_list[i].type = BRP_CONTEXT;
3115 cortex_a->brp_list[i].value = 0;
3116 cortex_a->brp_list[i].control = 0;
3117 cortex_a->brp_list[i].BRPn = i;
3118 }
3119
3120 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3121
3122 /* select debug_ap as default */
3123 swjdp->apsel = armv7a->debug_ap->ap_num;
3124
3125 target_set_examined(target);
3126 return ERROR_OK;
3127 }
3128
3129 static int cortex_a_examine(struct target *target)
3130 {
3131 int retval = ERROR_OK;
3132
3133 /* Reestablish communication after target reset */
3134 retval = cortex_a_examine_first(target);
3135
3136 /* Configure core debug access */
3137 if (retval == ERROR_OK)
3138 retval = cortex_a_init_debug_access(target);
3139
3140 return retval;
3141 }
3142
3143 /*
3144 * Cortex-A target creation and initialization
3145 */
3146
3147 static int cortex_a_init_target(struct command_context *cmd_ctx,
3148 struct target *target)
3149 {
3150 /* examine_first() does a bunch of this */
3151 return ERROR_OK;
3152 }
3153
3154 static int cortex_a_init_arch_info(struct target *target,
3155 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3156 {
3157 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3158
3159 /* Setup struct cortex_a_common */
3160 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3161
3162 /* tap has no dap initialized */
3163 if (!tap->dap) {
3164 tap->dap = dap_init();
3165
3166 /* Leave (only) generic DAP stuff for debugport_init() */
3167 tap->dap->tap = tap;
3168 }
3169
3170 armv7a->arm.dap = tap->dap;
3171
3172 cortex_a->fast_reg_read = 0;
3173
3174 /* register arch-specific functions */
3175 armv7a->examine_debug_reason = NULL;
3176
3177 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3178
3179 armv7a->pre_restore_context = NULL;
3180
3181 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3182
3183
3184 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3185
3186 /* REVISIT v7a setup should be in a v7a-specific routine */
3187 armv7a_init_arch_info(target, armv7a);
3188 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3189
3190 return ERROR_OK;
3191 }
3192
3193 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3194 {
3195 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3196
3197 cortex_a->armv7a_common.is_armv7r = false;
3198
3199 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3200 }
3201
3202 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3203 {
3204 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3205
3206 cortex_a->armv7a_common.is_armv7r = true;
3207
3208 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3209 }
3210
3211 static void cortex_a_deinit_target(struct target *target)
3212 {
3213 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3214 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3215
3216 free(cortex_a->brp_list);
3217 free(dpm->dbp);
3218 free(dpm->dwp);
3219 free(cortex_a);
3220 }
3221
3222 static int cortex_a_mmu(struct target *target, int *enabled)
3223 {
3224 struct armv7a_common *armv7a = target_to_armv7a(target);
3225
3226 if (target->state != TARGET_HALTED) {
3227 LOG_ERROR("%s: target not halted", __func__);
3228 return ERROR_TARGET_INVALID;
3229 }
3230
3231 if (armv7a->is_armv7r)
3232 *enabled = 0;
3233 else
3234 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3235
3236 return ERROR_OK;
3237 }
3238
3239 static int cortex_a_virt2phys(struct target *target,
3240 uint32_t virt, uint32_t *phys)
3241 {
3242 int retval = ERROR_FAIL;
3243 struct armv7a_common *armv7a = target_to_armv7a(target);
3244 struct adiv5_dap *swjdp = armv7a->arm.dap;
3245 uint8_t apsel = swjdp->apsel;
3246 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3247 uint32_t ret;
3248 retval = armv7a_mmu_translate_va(target,
3249 virt, &ret);
3250 if (retval != ERROR_OK)
3251 goto done;
3252 *phys = ret;
3253 } else {/* use this method if armv7a->memory_ap not selected
3254 * mmu must be enable in order to get a correct translation */
3255 retval = cortex_a_mmu_modify(target, 1);
3256 if (retval != ERROR_OK)
3257 goto done;
3258 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3259 }
3260 done:
3261 return retval;
3262 }
3263
3264 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3265 {
3266 struct target *target = get_current_target(CMD_CTX);
3267 struct armv7a_common *armv7a = target_to_armv7a(target);
3268
3269 return armv7a_handle_cache_info_command(CMD_CTX,
3270 &armv7a->armv7a_mmu.armv7a_cache);
3271 }
3272
3273
3274 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3275 {
3276 struct target *target = get_current_target(CMD_CTX);
3277 if (!target_was_examined(target)) {
3278 LOG_ERROR("target not examined yet");
3279 return ERROR_FAIL;
3280 }
3281
3282 return cortex_a_init_debug_access(target);
3283 }
3284 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3285 {
3286 struct target *target = get_current_target(CMD_CTX);
3287 /* check target is an smp target */
3288 struct target_list *head;
3289 struct target *curr;
3290 head = target->head;
3291 target->smp = 0;
3292 if (head != (struct target_list *)NULL) {
3293 while (head != (struct target_list *)NULL) {
3294 curr = head->target;
3295 curr->smp = 0;
3296 head = head->next;
3297 }
3298 /* fixes the target display to the debugger */
3299 target->gdb_service->target = target;
3300 }
3301 return ERROR_OK;
3302 }
3303
3304 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3305 {
3306 struct target *target = get_current_target(CMD_CTX);
3307 struct target_list *head;
3308 struct target *curr;
3309 head = target->head;
3310 if (head != (struct target_list *)NULL) {
3311 target->smp = 1;
3312 while (head != (struct target_list *)NULL) {
3313 curr = head->target;
3314 curr->smp = 1;
3315 head = head->next;
3316 }
3317 }
3318 return ERROR_OK;
3319 }
3320
3321 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3322 {
3323 struct target *target = get_current_target(CMD_CTX);
3324 int retval = ERROR_OK;
3325 struct target_list *head;
3326 head = target->head;
3327 if (head != (struct target_list *)NULL) {
3328 if (CMD_ARGC == 1) {
3329 int coreid = 0;
3330 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3331 if (ERROR_OK != retval)
3332 return retval;
3333 target->gdb_service->core[1] = coreid;
3334
3335 }
3336 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3337 , target->gdb_service->core[1]);
3338 }
3339 return ERROR_OK;
3340 }
3341
3342 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3343 {
3344 struct target *target = get_current_target(CMD_CTX);
3345 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3346
3347 static const Jim_Nvp nvp_maskisr_modes[] = {
3348 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3349 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3350 { .name = NULL, .value = -1 },
3351 };
3352 const Jim_Nvp *n;
3353
3354 if (CMD_ARGC > 0) {
3355 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3356 if (n->name == NULL) {
3357 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3358 return ERROR_COMMAND_SYNTAX_ERROR;
3359 }
3360
3361 cortex_a->isrmasking_mode = n->value;
3362 }
3363
3364 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3365 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3366
3367 return ERROR_OK;
3368 }
3369
3370 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3371 {
3372 struct target *target = get_current_target(CMD_CTX);
3373 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3374
3375 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3376 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3377 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3378 { .name = NULL, .value = -1 },
3379 };
3380 const Jim_Nvp *n;
3381
3382 if (CMD_ARGC > 0) {
3383 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3384 if (n->name == NULL)
3385 return ERROR_COMMAND_SYNTAX_ERROR;
3386 cortex_a->dacrfixup_mode = n->value;
3387
3388 }
3389
3390 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3391 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3392
3393 return ERROR_OK;
3394 }
3395
3396 static const struct command_registration cortex_a_exec_command_handlers[] = {
3397 {
3398 .name = "cache_info",
3399 .handler = cortex_a_handle_cache_info_command,
3400 .mode = COMMAND_EXEC,
3401 .help = "display information about target caches",
3402 .usage = "",
3403 },
3404 {
3405 .name = "dbginit",
3406 .handler = cortex_a_handle_dbginit_command,
3407 .mode = COMMAND_EXEC,
3408 .help = "Initialize core debug",
3409 .usage = "",
3410 },
3411 { .name = "smp_off",
3412 .handler = cortex_a_handle_smp_off_command,
3413 .mode = COMMAND_EXEC,
3414 .help = "Stop smp handling",
3415 .usage = "",},
3416 {
3417 .name = "smp_on",
3418 .handler = cortex_a_handle_smp_on_command,
3419 .mode = COMMAND_EXEC,
3420 .help = "Restart smp handling",
3421 .usage = "",
3422 },
3423 {
3424 .name = "smp_gdb",
3425 .handler = cortex_a_handle_smp_gdb_command,
3426 .mode = COMMAND_EXEC,
3427 .help = "display/fix current core played to gdb",
3428 .usage = "",
3429 },
3430 {
3431 .name = "maskisr",
3432 .handler = handle_cortex_a_mask_interrupts_command,
3433 .mode = COMMAND_ANY,
3434 .help = "mask cortex_a interrupts",
3435 .usage = "['on'|'off']",
3436 },
3437 {
3438 .name = "dacrfixup",
3439 .handler = handle_cortex_a_dacrfixup_command,
3440 .mode = COMMAND_EXEC,
3441 .help = "set domain access control (DACR) to all-manager "
3442 "on memory access",
3443 .usage = "['on'|'off']",
3444 },
3445
3446 COMMAND_REGISTRATION_DONE
3447 };
3448 static const struct command_registration cortex_a_command_handlers[] = {
3449 {
3450 .chain = arm_command_handlers,
3451 },
3452 {
3453 .chain = armv7a_command_handlers,
3454 },
3455 {
3456 .name = "cortex_a",
3457 .mode = COMMAND_ANY,
3458 .help = "Cortex-A command group",
3459 .usage = "",
3460 .chain = cortex_a_exec_command_handlers,
3461 },
3462 COMMAND_REGISTRATION_DONE
3463 };
3464
3465 struct target_type cortexa_target = {
3466 .name = "cortex_a",
3467 .deprecated_name = "cortex_a8",
3468
3469 .poll = cortex_a_poll,
3470 .arch_state = armv7a_arch_state,
3471
3472 .halt = cortex_a_halt,
3473 .resume = cortex_a_resume,
3474 .step = cortex_a_step,
3475
3476 .assert_reset = cortex_a_assert_reset,
3477 .deassert_reset = cortex_a_deassert_reset,
3478
3479 /* REVISIT allow exporting VFP3 registers ... */
3480 .get_gdb_reg_list = arm_get_gdb_reg_list,
3481
3482 .read_memory = cortex_a_read_memory,
3483 .write_memory = cortex_a_write_memory,
3484
3485 .read_buffer = cortex_a_read_buffer,
3486 .write_buffer = cortex_a_write_buffer,
3487
3488 .checksum_memory = arm_checksum_memory,
3489 .blank_check_memory = arm_blank_check_memory,
3490
3491 .run_algorithm = armv4_5_run_algorithm,
3492
3493 .add_breakpoint = cortex_a_add_breakpoint,
3494 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3495 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3496 .remove_breakpoint = cortex_a_remove_breakpoint,
3497 .add_watchpoint = NULL,
3498 .remove_watchpoint = NULL,
3499
3500 .commands = cortex_a_command_handlers,
3501 .target_create = cortex_a_target_create,
3502 .init_target = cortex_a_init_target,
3503 .examine = cortex_a_examine,
3504 .deinit_target = cortex_a_deinit_target,
3505
3506 .read_phys_memory = cortex_a_read_phys_memory,
3507 .write_phys_memory = cortex_a_write_phys_memory,
3508 .mmu = cortex_a_mmu,
3509 .virt2phys = cortex_a_virt2phys,
3510 };
3511
3512 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3513 {
3514 .name = "cache_info",
3515 .handler = cortex_a_handle_cache_info_command,
3516 .mode = COMMAND_EXEC,
3517 .help = "display information about target caches",
3518 .usage = "",
3519 },
3520 {
3521 .name = "dbginit",
3522 .handler = cortex_a_handle_dbginit_command,
3523 .mode = COMMAND_EXEC,
3524 .help = "Initialize core debug",
3525 .usage = "",
3526 },
3527 {
3528 .name = "maskisr",
3529 .handler = handle_cortex_a_mask_interrupts_command,
3530 .mode = COMMAND_EXEC,
3531 .help = "mask cortex_r4 interrupts",
3532 .usage = "['on'|'off']",
3533 },
3534
3535 COMMAND_REGISTRATION_DONE
3536 };
3537 static const struct command_registration cortex_r4_command_handlers[] = {
3538 {
3539 .chain = arm_command_handlers,
3540 },
3541 {
3542 .chain = armv7a_command_handlers,
3543 },
3544 {
3545 .name = "cortex_r4",
3546 .mode = COMMAND_ANY,
3547 .help = "Cortex-R4 command group",
3548 .usage = "",
3549 .chain = cortex_r4_exec_command_handlers,
3550 },
3551 COMMAND_REGISTRATION_DONE
3552 };
3553
3554 struct target_type cortexr4_target = {
3555 .name = "cortex_r4",
3556
3557 .poll = cortex_a_poll,
3558 .arch_state = armv7a_arch_state,
3559
3560 .halt = cortex_a_halt,
3561 .resume = cortex_a_resume,
3562 .step = cortex_a_step,
3563
3564 .assert_reset = cortex_a_assert_reset,
3565 .deassert_reset = cortex_a_deassert_reset,
3566
3567 /* REVISIT allow exporting VFP3 registers ... */
3568 .get_gdb_reg_list = arm_get_gdb_reg_list,
3569
3570 .read_memory = cortex_a_read_memory,
3571 .write_memory = cortex_a_write_memory,
3572
3573 .checksum_memory = arm_checksum_memory,
3574 .blank_check_memory = arm_blank_check_memory,
3575
3576 .run_algorithm = armv4_5_run_algorithm,
3577
3578 .add_breakpoint = cortex_a_add_breakpoint,
3579 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3580 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3581 .remove_breakpoint = cortex_a_remove_breakpoint,
3582 .add_watchpoint = NULL,
3583 .remove_watchpoint = NULL,
3584
3585 .commands = cortex_r4_command_handlers,
3586 .target_create = cortex_r4_target_create,
3587 .init_target = cortex_a_init_target,
3588 .examine = cortex_a_examine,
3589 .deinit_target = cortex_a_deinit_target,
3590 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)