cortex_a: Update instruction cache after setting a soft breakpoint
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_apb_ab_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
142 cortex_a->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a->cp15_control_reg_curr);
147 }
148 } else {
149 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
150 if (cortex_a->cp15_control_reg_curr & 0x4U) {
151 /* data cache is active */
152 cortex_a->cp15_control_reg_curr &= ~0x4U;
153 /* flush data cache armv7 function to be called */
154 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
155 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
156 }
157 cortex_a->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175
176 LOG_DEBUG(" ");
177
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
181 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
182 if (retval != ERROR_OK) {
183 /* try again */
184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
185 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
186 if (retval == ERROR_OK)
187 LOG_USER(
188 "Locking debug access failed on first, but succeeded on second try.");
189 }
190
191 return retval;
192 }
193
194 /*
195 * Cortex-A Basic debug access, very low level assumes state is saved
196 */
197 static int cortex_a_init_debug_access(struct target *target)
198 {
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = armv7a->arm.dap;
201 int retval;
202 uint32_t dbg_osreg;
203 uint32_t cortex_part_num;
204 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
205
206 LOG_DEBUG(" ");
207 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT;
209
210 switch (cortex_part_num) {
211 case CORTEX_A7_PARTNUM:
212 case CORTEX_A15_PARTNUM:
213 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
214 armv7a->debug_base + CPUDBG_OSLSR,
215 &dbg_osreg);
216 if (retval != ERROR_OK)
217 return retval;
218
219 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
220
221 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
222 /* Unlocking the DEBUG OS registers for modification */
223 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_OSLAR,
225 0);
226 break;
227
228 case CORTEX_A5_PARTNUM:
229 case CORTEX_A8_PARTNUM:
230 case CORTEX_A9_PARTNUM:
231 default:
232 retval = cortex_a8_init_debug_access(target);
233 }
234
235 if (retval != ERROR_OK)
236 return retval;
237 /* Clear Sticky Power Down status Bit in PRSR to enable access to
238 the registers in the Core Power Domain */
239 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
240 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
241 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
242
243 if (retval != ERROR_OK)
244 return retval;
245
246 /* Enabling of instruction execution in debug mode is done in debug_entry code */
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return cortex_a_poll(target);
252 }
253
254 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
255 {
256 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
257 * Writes final value of DSCR into *dscr. Pass force to force always
258 * reading DSCR at least once. */
259 struct armv7a_common *armv7a = target_to_armv7a(target);
260 struct adiv5_dap *swjdp = armv7a->arm.dap;
261 long long then = timeval_ms();
262 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
263 force = false;
264 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
265 armv7a->debug_base + CPUDBG_DSCR, dscr);
266 if (retval != ERROR_OK) {
267 LOG_ERROR("Could not read DSCR register");
268 return retval;
269 }
270 if (timeval_ms() > then + 1000) {
271 LOG_ERROR("Timeout waiting for InstrCompl=1");
272 return ERROR_FAIL;
273 }
274 }
275 return ERROR_OK;
276 }
277
278 /* To reduce needless round-trips, pass in a pointer to the current
279 * DSCR value. Initialize it to zero if you just need to know the
280 * value on return from this function; or DSCR_INSTR_COMP if you
281 * happen to know that no instruction is pending.
282 */
283 static int cortex_a_exec_opcode(struct target *target,
284 uint32_t opcode, uint32_t *dscr_p)
285 {
286 uint32_t dscr;
287 int retval;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289 struct adiv5_dap *swjdp = armv7a->arm.dap;
290
291 dscr = dscr_p ? *dscr_p : 0;
292
293 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
294
295 /* Wait for InstrCompl bit to be set */
296 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
297 if (retval != ERROR_OK)
298 return retval;
299
300 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
301 armv7a->debug_base + CPUDBG_ITR, opcode);
302 if (retval != ERROR_OK)
303 return retval;
304
305 long long then = timeval_ms();
306 do {
307 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
308 armv7a->debug_base + CPUDBG_DSCR, &dscr);
309 if (retval != ERROR_OK) {
310 LOG_ERROR("Could not read DSCR register");
311 return retval;
312 }
313 if (timeval_ms() > then + 1000) {
314 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
315 return ERROR_FAIL;
316 }
317 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
318
319 if (dscr_p)
320 *dscr_p = dscr;
321
322 return retval;
323 }
324
325 /**************************************************************************
326 Read core register with very few exec_opcode, fast but needs work_area.
327 This can cause problems with MMU active.
328 **************************************************************************/
329 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
330 uint32_t *regfile)
331 {
332 int retval = ERROR_OK;
333 struct armv7a_common *armv7a = target_to_armv7a(target);
334 struct adiv5_dap *swjdp = armv7a->arm.dap;
335
336 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
337 if (retval != ERROR_OK)
338 return retval;
339 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
340 if (retval != ERROR_OK)
341 return retval;
342 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
343 if (retval != ERROR_OK)
344 return retval;
345
346 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
347 (uint8_t *)(&regfile[1]), 4, 15, address);
348
349 return retval;
350 }
351
352 static int cortex_a_dap_read_coreregister_u32(struct target *target,
353 uint32_t *value, int regnum)
354 {
355 int retval = ERROR_OK;
356 uint8_t reg = regnum&0xFF;
357 uint32_t dscr = 0;
358 struct armv7a_common *armv7a = target_to_armv7a(target);
359 struct adiv5_dap *swjdp = armv7a->arm.dap;
360
361 if (reg > 17)
362 return retval;
363
364 if (reg < 15) {
365 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
366 retval = cortex_a_exec_opcode(target,
367 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
368 &dscr);
369 if (retval != ERROR_OK)
370 return retval;
371 } else if (reg == 15) {
372 /* "MOV r0, r15"; then move r0 to DCCTX */
373 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 retval = cortex_a_exec_opcode(target,
377 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
378 &dscr);
379 if (retval != ERROR_OK)
380 return retval;
381 } else {
382 /* "MRS r0, CPSR" or "MRS r0, SPSR"
383 * then move r0 to DCCTX
384 */
385 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
386 if (retval != ERROR_OK)
387 return retval;
388 retval = cortex_a_exec_opcode(target,
389 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
390 &dscr);
391 if (retval != ERROR_OK)
392 return retval;
393 }
394
395 /* Wait for DTRRXfull then read DTRRTX */
396 long long then = timeval_ms();
397 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
398 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
399 armv7a->debug_base + CPUDBG_DSCR, &dscr);
400 if (retval != ERROR_OK)
401 return retval;
402 if (timeval_ms() > then + 1000) {
403 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
404 return ERROR_FAIL;
405 }
406 }
407
408 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
409 armv7a->debug_base + CPUDBG_DTRTX, value);
410 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
411
412 return retval;
413 }
414
415 static int cortex_a_dap_write_coreregister_u32(struct target *target,
416 uint32_t value, int regnum)
417 {
418 int retval = ERROR_OK;
419 uint8_t Rd = regnum&0xFF;
420 uint32_t dscr;
421 struct armv7a_common *armv7a = target_to_armv7a(target);
422 struct adiv5_dap *swjdp = armv7a->arm.dap;
423
424 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
425
426 /* Check that DCCRX is not full */
427 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
428 armv7a->debug_base + CPUDBG_DSCR, &dscr);
429 if (retval != ERROR_OK)
430 return retval;
431 if (dscr & DSCR_DTR_RX_FULL) {
432 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
433 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
434 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
435 &dscr);
436 if (retval != ERROR_OK)
437 return retval;
438 }
439
440 if (Rd > 17)
441 return retval;
442
443 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
444 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
445 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
446 armv7a->debug_base + CPUDBG_DTRRX, value);
447 if (retval != ERROR_OK)
448 return retval;
449
450 if (Rd < 15) {
451 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
452 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
453 &dscr);
454
455 if (retval != ERROR_OK)
456 return retval;
457 } else if (Rd == 15) {
458 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
459 * then "mov r15, r0"
460 */
461 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
466 if (retval != ERROR_OK)
467 return retval;
468 } else {
469 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
470 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
471 */
472 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 &dscr);
474 if (retval != ERROR_OK)
475 return retval;
476 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
480
481 /* "Prefetch flush" after modifying execution status in CPSR */
482 if (Rd == 16) {
483 retval = cortex_a_exec_opcode(target,
484 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
485 &dscr);
486 if (retval != ERROR_OK)
487 return retval;
488 }
489 }
490
491 return retval;
492 }
493
494 /* Write to memory mapped registers directly with no cache or mmu handling */
495 static int cortex_a_dap_write_memap_register_u32(struct target *target,
496 uint32_t address,
497 uint32_t value)
498 {
499 int retval;
500 struct armv7a_common *armv7a = target_to_armv7a(target);
501 struct adiv5_dap *swjdp = armv7a->arm.dap;
502
503 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
504
505 return retval;
506 }
507
508 /*
509 * Cortex-A implementation of Debug Programmer's Model
510 *
511 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
512 * so there's no need to poll for it before executing an instruction.
513 *
514 * NOTE that in several of these cases the "stall" mode might be useful.
515 * It'd let us queue a few operations together... prepare/finish might
516 * be the places to enable/disable that mode.
517 */
518
519 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
520 {
521 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
522 }
523
524 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
525 {
526 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
527 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
528 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
529 }
530
531 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
532 uint32_t *dscr_p)
533 {
534 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
535 uint32_t dscr = DSCR_INSTR_COMP;
536 int retval;
537
538 if (dscr_p)
539 dscr = *dscr_p;
540
541 /* Wait for DTRRXfull */
542 long long then = timeval_ms();
543 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
544 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
545 a->armv7a_common.debug_base + CPUDBG_DSCR,
546 &dscr);
547 if (retval != ERROR_OK)
548 return retval;
549 if (timeval_ms() > then + 1000) {
550 LOG_ERROR("Timeout waiting for read dcc");
551 return ERROR_FAIL;
552 }
553 }
554
555 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
556 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
557 if (retval != ERROR_OK)
558 return retval;
559 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
560
561 if (dscr_p)
562 *dscr_p = dscr;
563
564 return retval;
565 }
566
567 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
568 {
569 struct cortex_a_common *a = dpm_to_a(dpm);
570 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
571 uint32_t dscr;
572 int retval;
573
574 /* set up invariant: INSTR_COMP is set after ever DPM operation */
575 long long then = timeval_ms();
576 for (;; ) {
577 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
578 a->armv7a_common.debug_base + CPUDBG_DSCR,
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582 if ((dscr & DSCR_INSTR_COMP) != 0)
583 break;
584 if (timeval_ms() > then + 1000) {
585 LOG_ERROR("Timeout waiting for dpm prepare");
586 return ERROR_FAIL;
587 }
588 }
589
590 /* this "should never happen" ... */
591 if (dscr & DSCR_DTR_RX_FULL) {
592 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
593 /* Clear DCCRX */
594 retval = cortex_a_exec_opcode(
595 a->armv7a_common.arm.target,
596 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
597 &dscr);
598 if (retval != ERROR_OK)
599 return retval;
600 }
601
602 return retval;
603 }
604
605 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
606 {
607 /* REVISIT what could be done here? */
608 return ERROR_OK;
609 }
610
611 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
612 uint32_t opcode, uint32_t data)
613 {
614 struct cortex_a_common *a = dpm_to_a(dpm);
615 int retval;
616 uint32_t dscr = DSCR_INSTR_COMP;
617
618 retval = cortex_a_write_dcc(a, data);
619 if (retval != ERROR_OK)
620 return retval;
621
622 return cortex_a_exec_opcode(
623 a->armv7a_common.arm.target,
624 opcode,
625 &dscr);
626 }
627
628 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
629 uint32_t opcode, uint32_t data)
630 {
631 struct cortex_a_common *a = dpm_to_a(dpm);
632 uint32_t dscr = DSCR_INSTR_COMP;
633 int retval;
634
635 retval = cortex_a_write_dcc(a, data);
636 if (retval != ERROR_OK)
637 return retval;
638
639 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
640 retval = cortex_a_exec_opcode(
641 a->armv7a_common.arm.target,
642 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
643 &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 /* then the opcode, taking data from R0 */
648 retval = cortex_a_exec_opcode(
649 a->armv7a_common.arm.target,
650 opcode,
651 &dscr);
652
653 return retval;
654 }
655
656 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
657 {
658 struct target *target = dpm->arm->target;
659 uint32_t dscr = DSCR_INSTR_COMP;
660
661 /* "Prefetch flush" after modifying execution status in CPSR */
662 return cortex_a_exec_opcode(target,
663 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
664 &dscr);
665 }
666
667 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
668 uint32_t opcode, uint32_t *data)
669 {
670 struct cortex_a_common *a = dpm_to_a(dpm);
671 int retval;
672 uint32_t dscr = DSCR_INSTR_COMP;
673
674 /* the opcode, writing data to DCC */
675 retval = cortex_a_exec_opcode(
676 a->armv7a_common.arm.target,
677 opcode,
678 &dscr);
679 if (retval != ERROR_OK)
680 return retval;
681
682 return cortex_a_read_dcc(a, data, &dscr);
683 }
684
685
686 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
687 uint32_t opcode, uint32_t *data)
688 {
689 struct cortex_a_common *a = dpm_to_a(dpm);
690 uint32_t dscr = DSCR_INSTR_COMP;
691 int retval;
692
693 /* the opcode, writing data to R0 */
694 retval = cortex_a_exec_opcode(
695 a->armv7a_common.arm.target,
696 opcode,
697 &dscr);
698 if (retval != ERROR_OK)
699 return retval;
700
701 /* write R0 to DCC */
702 retval = cortex_a_exec_opcode(
703 a->armv7a_common.arm.target,
704 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
705 &dscr);
706 if (retval != ERROR_OK)
707 return retval;
708
709 return cortex_a_read_dcc(a, data, &dscr);
710 }
711
712 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
713 uint32_t addr, uint32_t control)
714 {
715 struct cortex_a_common *a = dpm_to_a(dpm);
716 uint32_t vr = a->armv7a_common.debug_base;
717 uint32_t cr = a->armv7a_common.debug_base;
718 int retval;
719
720 switch (index_t) {
721 case 0 ... 15: /* breakpoints */
722 vr += CPUDBG_BVR_BASE;
723 cr += CPUDBG_BCR_BASE;
724 break;
725 case 16 ... 31: /* watchpoints */
726 vr += CPUDBG_WVR_BASE;
727 cr += CPUDBG_WCR_BASE;
728 index_t -= 16;
729 break;
730 default:
731 return ERROR_FAIL;
732 }
733 vr += 4 * index_t;
734 cr += 4 * index_t;
735
736 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
737 (unsigned) vr, (unsigned) cr);
738
739 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
740 vr, addr);
741 if (retval != ERROR_OK)
742 return retval;
743 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
744 cr, control);
745 return retval;
746 }
747
748 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
749 {
750 struct cortex_a_common *a = dpm_to_a(dpm);
751 uint32_t cr;
752
753 switch (index_t) {
754 case 0 ... 15:
755 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
756 break;
757 case 16 ... 31:
758 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
759 index_t -= 16;
760 break;
761 default:
762 return ERROR_FAIL;
763 }
764 cr += 4 * index_t;
765
766 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
767
768 /* clear control register */
769 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
770 }
771
772 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
773 {
774 struct arm_dpm *dpm = &a->armv7a_common.dpm;
775 int retval;
776
777 dpm->arm = &a->armv7a_common.arm;
778 dpm->didr = didr;
779
780 dpm->prepare = cortex_a_dpm_prepare;
781 dpm->finish = cortex_a_dpm_finish;
782
783 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
784 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
785 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
786
787 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
788 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
789
790 dpm->bpwp_enable = cortex_a_bpwp_enable;
791 dpm->bpwp_disable = cortex_a_bpwp_disable;
792
793 retval = arm_dpm_setup(dpm);
794 if (retval == ERROR_OK)
795 retval = arm_dpm_initialize(dpm);
796
797 return retval;
798 }
799 static struct target *get_cortex_a(struct target *target, int32_t coreid)
800 {
801 struct target_list *head;
802 struct target *curr;
803
804 head = target->head;
805 while (head != (struct target_list *)NULL) {
806 curr = head->target;
807 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
808 return curr;
809 head = head->next;
810 }
811 return target;
812 }
813 static int cortex_a_halt(struct target *target);
814
815 static int cortex_a_halt_smp(struct target *target)
816 {
817 int retval = 0;
818 struct target_list *head;
819 struct target *curr;
820 head = target->head;
821 while (head != (struct target_list *)NULL) {
822 curr = head->target;
823 if ((curr != target) && (curr->state != TARGET_HALTED))
824 retval += cortex_a_halt(curr);
825 head = head->next;
826 }
827 return retval;
828 }
829
830 static int update_halt_gdb(struct target *target)
831 {
832 int retval = 0;
833 if (target->gdb_service && target->gdb_service->core[0] == -1) {
834 target->gdb_service->target = target;
835 target->gdb_service->core[0] = target->coreid;
836 retval += cortex_a_halt_smp(target);
837 }
838 return retval;
839 }
840
841 /*
842 * Cortex-A Run control
843 */
844
845 static int cortex_a_poll(struct target *target)
846 {
847 int retval = ERROR_OK;
848 uint32_t dscr;
849 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
850 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
851 struct adiv5_dap *swjdp = armv7a->arm.dap;
852 enum target_state prev_target_state = target->state;
853 /* toggle to another core is done by gdb as follow */
854 /* maint packet J core_id */
855 /* continue */
856 /* the next polling trigger an halt event sent to gdb */
857 if ((target->state == TARGET_HALTED) && (target->smp) &&
858 (target->gdb_service) &&
859 (target->gdb_service->target == NULL)) {
860 target->gdb_service->target =
861 get_cortex_a(target, target->gdb_service->core[1]);
862 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
863 return retval;
864 }
865 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
866 armv7a->debug_base + CPUDBG_DSCR, &dscr);
867 if (retval != ERROR_OK)
868 return retval;
869 cortex_a->cpudbg_dscr = dscr;
870
871 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
872 if (prev_target_state != TARGET_HALTED) {
873 /* We have a halting debug event */
874 LOG_DEBUG("Target halted");
875 target->state = TARGET_HALTED;
876 if ((prev_target_state == TARGET_RUNNING)
877 || (prev_target_state == TARGET_UNKNOWN)
878 || (prev_target_state == TARGET_RESET)) {
879 retval = cortex_a_debug_entry(target);
880 if (retval != ERROR_OK)
881 return retval;
882 if (target->smp) {
883 retval = update_halt_gdb(target);
884 if (retval != ERROR_OK)
885 return retval;
886 }
887 target_call_event_callbacks(target,
888 TARGET_EVENT_HALTED);
889 }
890 if (prev_target_state == TARGET_DEBUG_RUNNING) {
891 LOG_DEBUG(" ");
892
893 retval = cortex_a_debug_entry(target);
894 if (retval != ERROR_OK)
895 return retval;
896 if (target->smp) {
897 retval = update_halt_gdb(target);
898 if (retval != ERROR_OK)
899 return retval;
900 }
901
902 target_call_event_callbacks(target,
903 TARGET_EVENT_DEBUG_HALTED);
904 }
905 }
906 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
907 target->state = TARGET_RUNNING;
908 else {
909 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
910 target->state = TARGET_UNKNOWN;
911 }
912
913 return retval;
914 }
915
916 static int cortex_a_halt(struct target *target)
917 {
918 int retval = ERROR_OK;
919 uint32_t dscr;
920 struct armv7a_common *armv7a = target_to_armv7a(target);
921 struct adiv5_dap *swjdp = armv7a->arm.dap;
922
923 /*
924 * Tell the core to be halted by writing DRCR with 0x1
925 * and then wait for the core to be halted.
926 */
927 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
928 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
929 if (retval != ERROR_OK)
930 return retval;
931
932 /*
933 * enter halting debug mode
934 */
935 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
936 armv7a->debug_base + CPUDBG_DSCR, &dscr);
937 if (retval != ERROR_OK)
938 return retval;
939
940 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
941 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
942 if (retval != ERROR_OK)
943 return retval;
944
945 long long then = timeval_ms();
946 for (;; ) {
947 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
948 armv7a->debug_base + CPUDBG_DSCR, &dscr);
949 if (retval != ERROR_OK)
950 return retval;
951 if ((dscr & DSCR_CORE_HALTED) != 0)
952 break;
953 if (timeval_ms() > then + 1000) {
954 LOG_ERROR("Timeout waiting for halt");
955 return ERROR_FAIL;
956 }
957 }
958
959 target->debug_reason = DBG_REASON_DBGRQ;
960
961 return ERROR_OK;
962 }
963
964 static int cortex_a_internal_restore(struct target *target, int current,
965 uint32_t *address, int handle_breakpoints, int debug_execution)
966 {
967 struct armv7a_common *armv7a = target_to_armv7a(target);
968 struct arm *arm = &armv7a->arm;
969 int retval;
970 uint32_t resume_pc;
971
972 if (!debug_execution)
973 target_free_all_working_areas(target);
974
975 #if 0
976 if (debug_execution) {
977 /* Disable interrupts */
978 /* We disable interrupts in the PRIMASK register instead of
979 * masking with C_MASKINTS,
980 * This is probably the same issue as Cortex-M3 Errata 377493:
981 * C_MASKINTS in parallel with disabled interrupts can cause
982 * local faults to not be taken. */
983 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
984 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
985 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
986
987 /* Make sure we are in Thumb mode */
988 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
989 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
990 32) | (1 << 24));
991 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
992 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
993 }
994 #endif
995
996 /* current = 1: continue on current pc, otherwise continue at <address> */
997 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
998 if (!current)
999 resume_pc = *address;
1000 else
1001 *address = resume_pc;
1002
1003 /* Make sure that the Armv7 gdb thumb fixups does not
1004 * kill the return address
1005 */
1006 switch (arm->core_state) {
1007 case ARM_STATE_ARM:
1008 resume_pc &= 0xFFFFFFFC;
1009 break;
1010 case ARM_STATE_THUMB:
1011 case ARM_STATE_THUMB_EE:
1012 /* When the return address is loaded into PC
1013 * bit 0 must be 1 to stay in Thumb state
1014 */
1015 resume_pc |= 0x1;
1016 break;
1017 case ARM_STATE_JAZELLE:
1018 LOG_ERROR("How do I resume into Jazelle state??");
1019 return ERROR_FAIL;
1020 }
1021 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1022 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1023 arm->pc->dirty = 1;
1024 arm->pc->valid = 1;
1025 /* restore dpm_mode at system halt */
1026 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1027 /* called it now before restoring context because it uses cpu
1028 * register r0 for restoring cp15 control register */
1029 retval = cortex_a_restore_cp15_control_reg(target);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 retval = cortex_a_restore_context(target, handle_breakpoints);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 target->debug_reason = DBG_REASON_NOTHALTED;
1036 target->state = TARGET_RUNNING;
1037
1038 /* registers are now invalid */
1039 register_cache_invalidate(arm->core_cache);
1040
1041 #if 0
1042 /* the front-end may request us not to handle breakpoints */
1043 if (handle_breakpoints) {
1044 /* Single step past breakpoint at current address */
1045 breakpoint = breakpoint_find(target, resume_pc);
1046 if (breakpoint) {
1047 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1048 cortex_m3_unset_breakpoint(target, breakpoint);
1049 cortex_m3_single_step_core(target);
1050 cortex_m3_set_breakpoint(target, breakpoint);
1051 }
1052 }
1053
1054 #endif
1055 return retval;
1056 }
1057
1058 static int cortex_a_internal_restart(struct target *target)
1059 {
1060 struct armv7a_common *armv7a = target_to_armv7a(target);
1061 struct arm *arm = &armv7a->arm;
1062 struct adiv5_dap *swjdp = arm->dap;
1063 int retval;
1064 uint32_t dscr;
1065 /*
1066 * * Restart core and wait for it to be started. Clear ITRen and sticky
1067 * * exception flags: see ARMv7 ARM, C5.9.
1068 *
1069 * REVISIT: for single stepping, we probably want to
1070 * disable IRQs by default, with optional override...
1071 */
1072
1073 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1074 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 if ((dscr & DSCR_INSTR_COMP) == 0)
1079 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1080
1081 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1082 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1083 if (retval != ERROR_OK)
1084 return retval;
1085
1086 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1087 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1088 DRCR_CLEAR_EXCEPTIONS);
1089 if (retval != ERROR_OK)
1090 return retval;
1091
1092 long long then = timeval_ms();
1093 for (;; ) {
1094 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1095 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1096 if (retval != ERROR_OK)
1097 return retval;
1098 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1099 break;
1100 if (timeval_ms() > then + 1000) {
1101 LOG_ERROR("Timeout waiting for resume");
1102 return ERROR_FAIL;
1103 }
1104 }
1105
1106 target->debug_reason = DBG_REASON_NOTHALTED;
1107 target->state = TARGET_RUNNING;
1108
1109 /* registers are now invalid */
1110 register_cache_invalidate(arm->core_cache);
1111
1112 return ERROR_OK;
1113 }
1114
1115 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1116 {
1117 int retval = 0;
1118 struct target_list *head;
1119 struct target *curr;
1120 uint32_t address;
1121 head = target->head;
1122 while (head != (struct target_list *)NULL) {
1123 curr = head->target;
1124 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1125 /* resume current address , not in step mode */
1126 retval += cortex_a_internal_restore(curr, 1, &address,
1127 handle_breakpoints, 0);
1128 retval += cortex_a_internal_restart(curr);
1129 }
1130 head = head->next;
1131
1132 }
1133 return retval;
1134 }
1135
1136 static int cortex_a_resume(struct target *target, int current,
1137 uint32_t address, int handle_breakpoints, int debug_execution)
1138 {
1139 int retval = 0;
1140 /* dummy resume for smp toggle in order to reduce gdb impact */
1141 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1142 /* simulate a start and halt of target */
1143 target->gdb_service->target = NULL;
1144 target->gdb_service->core[0] = target->gdb_service->core[1];
1145 /* fake resume at next poll we play the target core[1], see poll*/
1146 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1147 return 0;
1148 }
1149 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1150 if (target->smp) {
1151 target->gdb_service->core[0] = -1;
1152 retval = cortex_a_restore_smp(target, handle_breakpoints);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 }
1156 cortex_a_internal_restart(target);
1157
1158 if (!debug_execution) {
1159 target->state = TARGET_RUNNING;
1160 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1161 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1162 } else {
1163 target->state = TARGET_DEBUG_RUNNING;
1164 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1165 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1166 }
1167
1168 return ERROR_OK;
1169 }
1170
1171 static int cortex_a_debug_entry(struct target *target)
1172 {
1173 int i;
1174 uint32_t regfile[16], cpsr, dscr;
1175 int retval = ERROR_OK;
1176 struct working_area *regfile_working_area = NULL;
1177 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1178 struct armv7a_common *armv7a = target_to_armv7a(target);
1179 struct arm *arm = &armv7a->arm;
1180 struct adiv5_dap *swjdp = armv7a->arm.dap;
1181 struct reg *reg;
1182
1183 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1184
1185 /* REVISIT surely we should not re-read DSCR !! */
1186 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1187 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1188 if (retval != ERROR_OK)
1189 return retval;
1190
1191 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1192 * imprecise data aborts get discarded by issuing a Data
1193 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1194 */
1195
1196 /* Enable the ITR execution once we are in debug mode */
1197 dscr |= DSCR_ITR_EN;
1198 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1199 armv7a->debug_base + CPUDBG_DSCR, dscr);
1200 if (retval != ERROR_OK)
1201 return retval;
1202
1203 /* Examine debug reason */
1204 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1205
1206 /* save address of instruction that triggered the watchpoint? */
1207 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1208 uint32_t wfar;
1209
1210 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1211 armv7a->debug_base + CPUDBG_WFAR,
1212 &wfar);
1213 if (retval != ERROR_OK)
1214 return retval;
1215 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1216 }
1217
1218 /* REVISIT fast_reg_read is never set ... */
1219
1220 /* Examine target state and mode */
1221 if (cortex_a->fast_reg_read)
1222 target_alloc_working_area(target, 64, &regfile_working_area);
1223
1224 /* First load register acessible through core debug port*/
1225 if (!regfile_working_area)
1226 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1227 else {
1228 retval = cortex_a_read_regs_through_mem(target,
1229 regfile_working_area->address, regfile);
1230
1231 target_free_working_area(target, regfile_working_area);
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 /* read Current PSR */
1236 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1237 /* store current cpsr */
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1242
1243 arm_set_cpsr(arm, cpsr);
1244
1245 /* update cache */
1246 for (i = 0; i <= ARM_PC; i++) {
1247 reg = arm_reg_current(arm, i);
1248
1249 buf_set_u32(reg->value, 0, 32, regfile[i]);
1250 reg->valid = 1;
1251 reg->dirty = 0;
1252 }
1253
1254 /* Fixup PC Resume Address */
1255 if (cpsr & (1 << 5)) {
1256 /* T bit set for Thumb or ThumbEE state */
1257 regfile[ARM_PC] -= 4;
1258 } else {
1259 /* ARM state */
1260 regfile[ARM_PC] -= 8;
1261 }
1262
1263 reg = arm->pc;
1264 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1265 reg->dirty = reg->valid;
1266 }
1267
1268 #if 0
1269 /* TODO, Move this */
1270 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1271 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1272 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1273
1274 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1275 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1276
1277 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1278 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1279 #endif
1280
1281 /* Are we in an exception handler */
1282 /* armv4_5->exception_number = 0; */
1283 if (armv7a->post_debug_entry) {
1284 retval = armv7a->post_debug_entry(target);
1285 if (retval != ERROR_OK)
1286 return retval;
1287 }
1288
1289 return retval;
1290 }
1291
1292 static int cortex_a_post_debug_entry(struct target *target)
1293 {
1294 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1295 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1296 int retval;
1297
1298 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1299 retval = armv7a->arm.mrc(target, 15,
1300 0, 0, /* op1, op2 */
1301 1, 0, /* CRn, CRm */
1302 &cortex_a->cp15_control_reg);
1303 if (retval != ERROR_OK)
1304 return retval;
1305 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1306 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1307
1308 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1309 armv7a_identify_cache(target);
1310
1311 if (armv7a->is_armv7r) {
1312 armv7a->armv7a_mmu.mmu_enabled = 0;
1313 } else {
1314 armv7a->armv7a_mmu.mmu_enabled =
1315 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1316 }
1317 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1318 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1319 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1320 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1321 cortex_a->curr_mode = armv7a->arm.core_mode;
1322
1323 return ERROR_OK;
1324 }
1325
1326 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1327 {
1328 struct armv7a_common *armv7a = target_to_armv7a(target);
1329 struct adiv5_dap *swjdp = armv7a->arm.dap;
1330 uint32_t dscr;
1331
1332 /* Read DSCR */
1333 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1334 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1335 if (ERROR_OK != retval)
1336 return retval;
1337
1338 /* clear bitfield */
1339 dscr &= ~bit_mask;
1340 /* put new value */
1341 dscr |= value & bit_mask;
1342
1343 /* write new DSCR */
1344 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1345 armv7a->debug_base + CPUDBG_DSCR, dscr);
1346 return retval;
1347 }
1348
1349 static int cortex_a_step(struct target *target, int current, uint32_t address,
1350 int handle_breakpoints)
1351 {
1352 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1353 struct armv7a_common *armv7a = target_to_armv7a(target);
1354 struct arm *arm = &armv7a->arm;
1355 struct breakpoint *breakpoint = NULL;
1356 struct breakpoint stepbreakpoint;
1357 struct reg *r;
1358 int retval;
1359
1360 if (target->state != TARGET_HALTED) {
1361 LOG_WARNING("target not halted");
1362 return ERROR_TARGET_NOT_HALTED;
1363 }
1364
1365 /* current = 1: continue on current pc, otherwise continue at <address> */
1366 r = arm->pc;
1367 if (!current)
1368 buf_set_u32(r->value, 0, 32, address);
1369 else
1370 address = buf_get_u32(r->value, 0, 32);
1371
1372 /* The front-end may request us not to handle breakpoints.
1373 * But since Cortex-A uses breakpoint for single step,
1374 * we MUST handle breakpoints.
1375 */
1376 handle_breakpoints = 1;
1377 if (handle_breakpoints) {
1378 breakpoint = breakpoint_find(target, address);
1379 if (breakpoint)
1380 cortex_a_unset_breakpoint(target, breakpoint);
1381 }
1382
1383 /* Setup single step breakpoint */
1384 stepbreakpoint.address = address;
1385 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1386 ? 2 : 4;
1387 stepbreakpoint.type = BKPT_HARD;
1388 stepbreakpoint.set = 0;
1389
1390 /* Disable interrupts during single step if requested */
1391 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1392 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1393 if (ERROR_OK != retval)
1394 return retval;
1395 }
1396
1397 /* Break on IVA mismatch */
1398 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1399
1400 target->debug_reason = DBG_REASON_SINGLESTEP;
1401
1402 retval = cortex_a_resume(target, 1, address, 0, 0);
1403 if (retval != ERROR_OK)
1404 return retval;
1405
1406 long long then = timeval_ms();
1407 while (target->state != TARGET_HALTED) {
1408 retval = cortex_a_poll(target);
1409 if (retval != ERROR_OK)
1410 return retval;
1411 if (timeval_ms() > then + 1000) {
1412 LOG_ERROR("timeout waiting for target halt");
1413 return ERROR_FAIL;
1414 }
1415 }
1416
1417 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1418
1419 /* Re-enable interrupts if they were disabled */
1420 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1421 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1422 if (ERROR_OK != retval)
1423 return retval;
1424 }
1425
1426
1427 target->debug_reason = DBG_REASON_BREAKPOINT;
1428
1429 if (breakpoint)
1430 cortex_a_set_breakpoint(target, breakpoint, 0);
1431
1432 if (target->state != TARGET_HALTED)
1433 LOG_DEBUG("target stepped");
1434
1435 return ERROR_OK;
1436 }
1437
1438 static int cortex_a_restore_context(struct target *target, bool bpwp)
1439 {
1440 struct armv7a_common *armv7a = target_to_armv7a(target);
1441
1442 LOG_DEBUG(" ");
1443
1444 if (armv7a->pre_restore_context)
1445 armv7a->pre_restore_context(target);
1446
1447 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1448 }
1449
1450 /*
1451 * Cortex-A Breakpoint and watchpoint functions
1452 */
1453
1454 /* Setup hardware Breakpoint Register Pair */
1455 static int cortex_a_set_breakpoint(struct target *target,
1456 struct breakpoint *breakpoint, uint8_t matchmode)
1457 {
1458 int retval;
1459 int brp_i = 0;
1460 uint32_t control;
1461 uint8_t byte_addr_select = 0x0F;
1462 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1463 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1464 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1465
1466 if (breakpoint->set) {
1467 LOG_WARNING("breakpoint already set");
1468 return ERROR_OK;
1469 }
1470
1471 if (breakpoint->type == BKPT_HARD) {
1472 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1473 brp_i++;
1474 if (brp_i >= cortex_a->brp_num) {
1475 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1476 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1477 }
1478 breakpoint->set = brp_i + 1;
1479 if (breakpoint->length == 2)
1480 byte_addr_select = (3 << (breakpoint->address & 0x02));
1481 control = ((matchmode & 0x7) << 20)
1482 | (byte_addr_select << 5)
1483 | (3 << 1) | 1;
1484 brp_list[brp_i].used = 1;
1485 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1486 brp_list[brp_i].control = control;
1487 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1488 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1489 brp_list[brp_i].value);
1490 if (retval != ERROR_OK)
1491 return retval;
1492 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1493 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1494 brp_list[brp_i].control);
1495 if (retval != ERROR_OK)
1496 return retval;
1497 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1498 brp_list[brp_i].control,
1499 brp_list[brp_i].value);
1500 } else if (breakpoint->type == BKPT_SOFT) {
1501 uint8_t code[4];
1502 if (breakpoint->length == 2)
1503 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1504 else
1505 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1506 retval = target_read_memory(target,
1507 breakpoint->address & 0xFFFFFFFE,
1508 breakpoint->length, 1,
1509 breakpoint->orig_instr);
1510 if (retval != ERROR_OK)
1511 return retval;
1512
1513 /* make sure data cache is cleaned & invalidated down to PoC */
1514 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1515 armv7a_cache_flush_virt(target, breakpoint->address,
1516 breakpoint->length);
1517 }
1518
1519 retval = target_write_memory(target,
1520 breakpoint->address & 0xFFFFFFFE,
1521 breakpoint->length, 1, code);
1522 if (retval != ERROR_OK)
1523 return retval;
1524
1525 /* update i-cache at breakpoint location */
1526 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1527 breakpoint->length);
1528 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1529 breakpoint->length);
1530
1531 breakpoint->set = 0x11; /* Any nice value but 0 */
1532 }
1533
1534 return ERROR_OK;
1535 }
1536
1537 static int cortex_a_set_context_breakpoint(struct target *target,
1538 struct breakpoint *breakpoint, uint8_t matchmode)
1539 {
1540 int retval = ERROR_FAIL;
1541 int brp_i = 0;
1542 uint32_t control;
1543 uint8_t byte_addr_select = 0x0F;
1544 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1545 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1546 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1547
1548 if (breakpoint->set) {
1549 LOG_WARNING("breakpoint already set");
1550 return retval;
1551 }
1552 /*check available context BRPs*/
1553 while ((brp_list[brp_i].used ||
1554 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1555 brp_i++;
1556
1557 if (brp_i >= cortex_a->brp_num) {
1558 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1559 return ERROR_FAIL;
1560 }
1561
1562 breakpoint->set = brp_i + 1;
1563 control = ((matchmode & 0x7) << 20)
1564 | (byte_addr_select << 5)
1565 | (3 << 1) | 1;
1566 brp_list[brp_i].used = 1;
1567 brp_list[brp_i].value = (breakpoint->asid);
1568 brp_list[brp_i].control = control;
1569 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1570 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1571 brp_list[brp_i].value);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1575 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1576 brp_list[brp_i].control);
1577 if (retval != ERROR_OK)
1578 return retval;
1579 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1580 brp_list[brp_i].control,
1581 brp_list[brp_i].value);
1582 return ERROR_OK;
1583
1584 }
1585
1586 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1587 {
1588 int retval = ERROR_FAIL;
1589 int brp_1 = 0; /* holds the contextID pair */
1590 int brp_2 = 0; /* holds the IVA pair */
1591 uint32_t control_CTX, control_IVA;
1592 uint8_t CTX_byte_addr_select = 0x0F;
1593 uint8_t IVA_byte_addr_select = 0x0F;
1594 uint8_t CTX_machmode = 0x03;
1595 uint8_t IVA_machmode = 0x01;
1596 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1597 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1598 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1599
1600 if (breakpoint->set) {
1601 LOG_WARNING("breakpoint already set");
1602 return retval;
1603 }
1604 /*check available context BRPs*/
1605 while ((brp_list[brp_1].used ||
1606 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1607 brp_1++;
1608
1609 printf("brp(CTX) found num: %d\n", brp_1);
1610 if (brp_1 >= cortex_a->brp_num) {
1611 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1612 return ERROR_FAIL;
1613 }
1614
1615 while ((brp_list[brp_2].used ||
1616 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1617 brp_2++;
1618
1619 printf("brp(IVA) found num: %d\n", brp_2);
1620 if (brp_2 >= cortex_a->brp_num) {
1621 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1622 return ERROR_FAIL;
1623 }
1624
1625 breakpoint->set = brp_1 + 1;
1626 breakpoint->linked_BRP = brp_2;
1627 control_CTX = ((CTX_machmode & 0x7) << 20)
1628 | (brp_2 << 16)
1629 | (0 << 14)
1630 | (CTX_byte_addr_select << 5)
1631 | (3 << 1) | 1;
1632 brp_list[brp_1].used = 1;
1633 brp_list[brp_1].value = (breakpoint->asid);
1634 brp_list[brp_1].control = control_CTX;
1635 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1636 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1637 brp_list[brp_1].value);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1641 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1642 brp_list[brp_1].control);
1643 if (retval != ERROR_OK)
1644 return retval;
1645
1646 control_IVA = ((IVA_machmode & 0x7) << 20)
1647 | (brp_1 << 16)
1648 | (IVA_byte_addr_select << 5)
1649 | (3 << 1) | 1;
1650 brp_list[brp_2].used = 1;
1651 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1652 brp_list[brp_2].control = control_IVA;
1653 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1654 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1655 brp_list[brp_2].value);
1656 if (retval != ERROR_OK)
1657 return retval;
1658 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1659 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1660 brp_list[brp_2].control);
1661 if (retval != ERROR_OK)
1662 return retval;
1663
1664 return ERROR_OK;
1665 }
1666
1667 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1668 {
1669 int retval;
1670 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1671 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1672 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1673
1674 if (!breakpoint->set) {
1675 LOG_WARNING("breakpoint not set");
1676 return ERROR_OK;
1677 }
1678
1679 if (breakpoint->type == BKPT_HARD) {
1680 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1681 int brp_i = breakpoint->set - 1;
1682 int brp_j = breakpoint->linked_BRP;
1683 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1684 LOG_DEBUG("Invalid BRP number in breakpoint");
1685 return ERROR_OK;
1686 }
1687 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1688 brp_list[brp_i].control, brp_list[brp_i].value);
1689 brp_list[brp_i].used = 0;
1690 brp_list[brp_i].value = 0;
1691 brp_list[brp_i].control = 0;
1692 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1693 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1694 brp_list[brp_i].control);
1695 if (retval != ERROR_OK)
1696 return retval;
1697 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1698 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1699 brp_list[brp_i].value);
1700 if (retval != ERROR_OK)
1701 return retval;
1702 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1703 LOG_DEBUG("Invalid BRP number in breakpoint");
1704 return ERROR_OK;
1705 }
1706 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1707 brp_list[brp_j].control, brp_list[brp_j].value);
1708 brp_list[brp_j].used = 0;
1709 brp_list[brp_j].value = 0;
1710 brp_list[brp_j].control = 0;
1711 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1712 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1713 brp_list[brp_j].control);
1714 if (retval != ERROR_OK)
1715 return retval;
1716 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1717 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1718 brp_list[brp_j].value);
1719 if (retval != ERROR_OK)
1720 return retval;
1721 breakpoint->linked_BRP = 0;
1722 breakpoint->set = 0;
1723 return ERROR_OK;
1724
1725 } else {
1726 int brp_i = breakpoint->set - 1;
1727 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1728 LOG_DEBUG("Invalid BRP number in breakpoint");
1729 return ERROR_OK;
1730 }
1731 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1732 brp_list[brp_i].control, brp_list[brp_i].value);
1733 brp_list[brp_i].used = 0;
1734 brp_list[brp_i].value = 0;
1735 brp_list[brp_i].control = 0;
1736 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1737 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1738 brp_list[brp_i].control);
1739 if (retval != ERROR_OK)
1740 return retval;
1741 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1742 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1743 brp_list[brp_i].value);
1744 if (retval != ERROR_OK)
1745 return retval;
1746 breakpoint->set = 0;
1747 return ERROR_OK;
1748 }
1749 } else {
1750
1751 /* make sure data cache is cleaned & invalidated down to PoC */
1752 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1753 armv7a_cache_flush_virt(target, breakpoint->address,
1754 breakpoint->length);
1755 }
1756
1757 /* restore original instruction (kept in target endianness) */
1758 if (breakpoint->length == 4) {
1759 retval = target_write_memory(target,
1760 breakpoint->address & 0xFFFFFFFE,
1761 4, 1, breakpoint->orig_instr);
1762 if (retval != ERROR_OK)
1763 return retval;
1764 } else {
1765 retval = target_write_memory(target,
1766 breakpoint->address & 0xFFFFFFFE,
1767 2, 1, breakpoint->orig_instr);
1768 if (retval != ERROR_OK)
1769 return retval;
1770 }
1771
1772 /* update i-cache at breakpoint location */
1773 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1774 breakpoint->length);
1775 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1776 breakpoint->length);
1777 }
1778 breakpoint->set = 0;
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int cortex_a_add_breakpoint(struct target *target,
1784 struct breakpoint *breakpoint)
1785 {
1786 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1787
1788 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1789 LOG_INFO("no hardware breakpoint available");
1790 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1791 }
1792
1793 if (breakpoint->type == BKPT_HARD)
1794 cortex_a->brp_num_available--;
1795
1796 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1797 }
1798
1799 static int cortex_a_add_context_breakpoint(struct target *target,
1800 struct breakpoint *breakpoint)
1801 {
1802 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1803
1804 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1805 LOG_INFO("no hardware breakpoint available");
1806 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1807 }
1808
1809 if (breakpoint->type == BKPT_HARD)
1810 cortex_a->brp_num_available--;
1811
1812 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1813 }
1814
1815 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1816 struct breakpoint *breakpoint)
1817 {
1818 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1819
1820 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1821 LOG_INFO("no hardware breakpoint available");
1822 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1823 }
1824
1825 if (breakpoint->type == BKPT_HARD)
1826 cortex_a->brp_num_available--;
1827
1828 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1829 }
1830
1831
1832 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1833 {
1834 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1835
1836 #if 0
1837 /* It is perfectly possible to remove breakpoints while the target is running */
1838 if (target->state != TARGET_HALTED) {
1839 LOG_WARNING("target not halted");
1840 return ERROR_TARGET_NOT_HALTED;
1841 }
1842 #endif
1843
1844 if (breakpoint->set) {
1845 cortex_a_unset_breakpoint(target, breakpoint);
1846 if (breakpoint->type == BKPT_HARD)
1847 cortex_a->brp_num_available++;
1848 }
1849
1850
1851 return ERROR_OK;
1852 }
1853
1854 /*
1855 * Cortex-A Reset functions
1856 */
1857
1858 static int cortex_a_assert_reset(struct target *target)
1859 {
1860 struct armv7a_common *armv7a = target_to_armv7a(target);
1861
1862 LOG_DEBUG(" ");
1863
1864 /* FIXME when halt is requested, make it work somehow... */
1865
1866 /* Issue some kind of warm reset. */
1867 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1868 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1869 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1870 /* REVISIT handle "pulls" cases, if there's
1871 * hardware that needs them to work.
1872 */
1873 jtag_add_reset(0, 1);
1874 } else {
1875 LOG_ERROR("%s: how to reset?", target_name(target));
1876 return ERROR_FAIL;
1877 }
1878
1879 /* registers are now invalid */
1880 register_cache_invalidate(armv7a->arm.core_cache);
1881
1882 target->state = TARGET_RESET;
1883
1884 return ERROR_OK;
1885 }
1886
1887 static int cortex_a_deassert_reset(struct target *target)
1888 {
1889 int retval;
1890
1891 LOG_DEBUG(" ");
1892
1893 /* be certain SRST is off */
1894 jtag_add_reset(0, 0);
1895
1896 retval = cortex_a_poll(target);
1897 if (retval != ERROR_OK)
1898 return retval;
1899
1900 if (target->reset_halt) {
1901 if (target->state != TARGET_HALTED) {
1902 LOG_WARNING("%s: ran after reset and before halt ...",
1903 target_name(target));
1904 retval = target_halt(target);
1905 if (retval != ERROR_OK)
1906 return retval;
1907 }
1908 }
1909
1910 return ERROR_OK;
1911 }
1912
1913 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1914 {
1915 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1916 * New desired mode must be in mode. Current value of DSCR must be in
1917 * *dscr, which is updated with new value.
1918 *
1919 * This function elides actually sending the mode-change over the debug
1920 * interface if the mode is already set as desired.
1921 */
1922 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1923 if (new_dscr != *dscr) {
1924 struct armv7a_common *armv7a = target_to_armv7a(target);
1925 int retval = mem_ap_sel_write_atomic_u32(armv7a->arm.dap,
1926 armv7a->debug_ap, armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1927 if (retval == ERROR_OK)
1928 *dscr = new_dscr;
1929 return retval;
1930 } else {
1931 return ERROR_OK;
1932 }
1933 }
1934
1935 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1936 uint32_t value, uint32_t *dscr)
1937 {
1938 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1939 struct armv7a_common *armv7a = target_to_armv7a(target);
1940 struct adiv5_dap *swjdp = armv7a->arm.dap;
1941 long long then = timeval_ms();
1942 int retval;
1943
1944 while ((*dscr & mask) != value) {
1945 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1946 armv7a->debug_base + CPUDBG_DSCR, dscr);
1947 if (retval != ERROR_OK)
1948 return retval;
1949 if (timeval_ms() > then + 1000) {
1950 LOG_ERROR("timeout waiting for DSCR bit change");
1951 return ERROR_FAIL;
1952 }
1953 }
1954 return ERROR_OK;
1955 }
1956
1957 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1958 uint32_t *data, uint32_t *dscr)
1959 {
1960 int retval;
1961 struct armv7a_common *armv7a = target_to_armv7a(target);
1962 struct adiv5_dap *swjdp = armv7a->arm.dap;
1963
1964 /* Move from coprocessor to R0. */
1965 retval = cortex_a_exec_opcode(target, opcode, dscr);
1966 if (retval != ERROR_OK)
1967 return retval;
1968
1969 /* Move from R0 to DTRTX. */
1970 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1971 if (retval != ERROR_OK)
1972 return retval;
1973
1974 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1975 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1976 * must also check TXfull_l). Most of the time this will be free
1977 * because TXfull_l will be set immediately and cached in dscr. */
1978 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1979 DSCR_DTRTX_FULL_LATCHED, dscr);
1980 if (retval != ERROR_OK)
1981 return retval;
1982
1983 /* Read the value transferred to DTRTX. */
1984 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1985 armv7a->debug_base + CPUDBG_DTRTX, data);
1986 if (retval != ERROR_OK)
1987 return retval;
1988
1989 return ERROR_OK;
1990 }
1991
1992 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1993 uint32_t *dfsr, uint32_t *dscr)
1994 {
1995 int retval;
1996
1997 if (dfar) {
1998 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1999 if (retval != ERROR_OK)
2000 return retval;
2001 }
2002
2003 if (dfsr) {
2004 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2005 if (retval != ERROR_OK)
2006 return retval;
2007 }
2008
2009 return ERROR_OK;
2010 }
2011
2012 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2013 uint32_t data, uint32_t *dscr)
2014 {
2015 int retval;
2016 struct armv7a_common *armv7a = target_to_armv7a(target);
2017 struct adiv5_dap *swjdp = armv7a->arm.dap;
2018
2019 /* Write the value into DTRRX. */
2020 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2021 armv7a->debug_base + CPUDBG_DTRRX, data);
2022 if (retval != ERROR_OK)
2023 return retval;
2024
2025 /* Move from DTRRX to R0. */
2026 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2027 if (retval != ERROR_OK)
2028 return retval;
2029
2030 /* Move from R0 to coprocessor. */
2031 retval = cortex_a_exec_opcode(target, opcode, dscr);
2032 if (retval != ERROR_OK)
2033 return retval;
2034
2035 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2036 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2037 * check RXfull_l). Most of the time this will be free because RXfull_l
2038 * will be cleared immediately and cached in dscr. */
2039 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2040 if (retval != ERROR_OK)
2041 return retval;
2042
2043 return ERROR_OK;
2044 }
2045
2046 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2047 uint32_t dfsr, uint32_t *dscr)
2048 {
2049 int retval;
2050
2051 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2052 if (retval != ERROR_OK)
2053 return retval;
2054
2055 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2056 if (retval != ERROR_OK)
2057 return retval;
2058
2059 return ERROR_OK;
2060 }
2061
2062 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2063 {
2064 uint32_t status, upper4;
2065
2066 if (dfsr & (1 << 9)) {
2067 /* LPAE format. */
2068 status = dfsr & 0x3f;
2069 upper4 = status >> 2;
2070 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2071 return ERROR_TARGET_TRANSLATION_FAULT;
2072 else if (status == 33)
2073 return ERROR_TARGET_UNALIGNED_ACCESS;
2074 else
2075 return ERROR_TARGET_DATA_ABORT;
2076 } else {
2077 /* Normal format. */
2078 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2079 if (status == 1)
2080 return ERROR_TARGET_UNALIGNED_ACCESS;
2081 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2082 status == 9 || status == 11 || status == 13 || status == 15)
2083 return ERROR_TARGET_TRANSLATION_FAULT;
2084 else
2085 return ERROR_TARGET_DATA_ABORT;
2086 }
2087 }
2088
2089 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2090 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2091 {
2092 /* Writes count objects of size size from *buffer. Old value of DSCR must
2093 * be in *dscr; updated to new value. This is slow because it works for
2094 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2095 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2096 * preferred.
2097 * Preconditions:
2098 * - Address is in R0.
2099 * - R0 is marked dirty.
2100 */
2101 struct armv7a_common *armv7a = target_to_armv7a(target);
2102 struct adiv5_dap *swjdp = armv7a->arm.dap;
2103 struct arm *arm = &armv7a->arm;
2104 int retval;
2105
2106 /* Mark register R1 as dirty, to use for transferring data. */
2107 arm_reg_current(arm, 1)->dirty = true;
2108
2109 /* Switch to non-blocking mode if not already in that mode. */
2110 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2111 if (retval != ERROR_OK)
2112 return retval;
2113
2114 /* Go through the objects. */
2115 while (count) {
2116 /* Write the value to store into DTRRX. */
2117 uint32_t data, opcode;
2118 if (size == 1)
2119 data = *buffer;
2120 else if (size == 2)
2121 data = target_buffer_get_u16(target, buffer);
2122 else
2123 data = target_buffer_get_u32(target, buffer);
2124 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2125 armv7a->debug_base + CPUDBG_DTRRX, data);
2126 if (retval != ERROR_OK)
2127 return retval;
2128
2129 /* Transfer the value from DTRRX to R1. */
2130 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2131 if (retval != ERROR_OK)
2132 return retval;
2133
2134 /* Write the value transferred to R1 into memory. */
2135 if (size == 1)
2136 opcode = ARMV4_5_STRB_IP(1, 0);
2137 else if (size == 2)
2138 opcode = ARMV4_5_STRH_IP(1, 0);
2139 else
2140 opcode = ARMV4_5_STRW_IP(1, 0);
2141 retval = cortex_a_exec_opcode(target, opcode, dscr);
2142 if (retval != ERROR_OK)
2143 return retval;
2144
2145 /* Check for faults and return early. */
2146 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2147 return ERROR_OK; /* A data fault is not considered a system failure. */
2148
2149 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2150 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2151 * must also check RXfull_l). Most of the time this will be free
2152 * because RXfull_l will be cleared immediately and cached in dscr. */
2153 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2154 if (retval != ERROR_OK)
2155 return retval;
2156
2157 /* Advance. */
2158 buffer += size;
2159 --count;
2160 }
2161
2162 return ERROR_OK;
2163 }
2164
2165 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2166 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2167 {
2168 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2169 * in *dscr; updated to new value. This is fast but only works for
2170 * word-sized objects at aligned addresses.
2171 * Preconditions:
2172 * - Address is in R0 and must be a multiple of 4.
2173 * - R0 is marked dirty.
2174 */
2175 struct armv7a_common *armv7a = target_to_armv7a(target);
2176 struct adiv5_dap *swjdp = armv7a->arm.dap;
2177 int retval;
2178
2179 /* Switch to fast mode if not already in that mode. */
2180 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2181 if (retval != ERROR_OK)
2182 return retval;
2183
2184 /* Latch STC instruction. */
2185 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2186 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2187 if (retval != ERROR_OK)
2188 return retval;
2189
2190 /* Transfer all the data and issue all the instructions. */
2191 return mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2192 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2193 }
2194
2195 static int cortex_a_write_apb_ab_memory(struct target *target,
2196 uint32_t address, uint32_t size,
2197 uint32_t count, const uint8_t *buffer)
2198 {
2199 /* Write memory through APB-AP. */
2200 int retval, final_retval;
2201 struct armv7a_common *armv7a = target_to_armv7a(target);
2202 struct adiv5_dap *swjdp = armv7a->arm.dap;
2203 struct arm *arm = &armv7a->arm;
2204 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2205
2206 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2207 address, size, count);
2208 if (target->state != TARGET_HALTED) {
2209 LOG_WARNING("target not halted");
2210 return ERROR_TARGET_NOT_HALTED;
2211 }
2212
2213 if (!count)
2214 return ERROR_OK;
2215
2216 /* Clear any abort. */
2217 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2218 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2219 if (retval != ERROR_OK)
2220 return retval;
2221
2222 /* Read DSCR. */
2223 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2224 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2225 if (retval != ERROR_OK)
2226 return retval;
2227
2228 /* Switch to non-blocking mode if not already in that mode. */
2229 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2230 if (retval != ERROR_OK)
2231 goto out;
2232
2233 /* Mark R0 as dirty. */
2234 arm_reg_current(arm, 0)->dirty = true;
2235
2236 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2237 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2238 if (retval != ERROR_OK)
2239 goto out;
2240
2241 /* Get the memory address into R0. */
2242 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2243 armv7a->debug_base + CPUDBG_DTRRX, address);
2244 if (retval != ERROR_OK)
2245 goto out;
2246 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2247 if (retval != ERROR_OK)
2248 goto out;
2249
2250 if (size == 4 && (address % 4) == 0) {
2251 /* We are doing a word-aligned transfer, so use fast mode. */
2252 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2253 } else {
2254 /* Use slow path. */
2255 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2256 }
2257
2258 out:
2259 final_retval = retval;
2260
2261 /* Switch to non-blocking mode if not already in that mode. */
2262 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2263 if (final_retval == ERROR_OK)
2264 final_retval = retval;
2265
2266 /* Wait for last issued instruction to complete. */
2267 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2268 if (final_retval == ERROR_OK)
2269 final_retval = retval;
2270
2271 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2272 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2273 * check RXfull_l). Most of the time this will be free because RXfull_l
2274 * will be cleared immediately and cached in dscr. However, don’t do this
2275 * if there is fault, because then the instruction might not have completed
2276 * successfully. */
2277 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2278 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2279 if (retval != ERROR_OK)
2280 return retval;
2281 }
2282
2283 /* If there were any sticky abort flags, clear them. */
2284 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2285 fault_dscr = dscr;
2286 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2287 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2288 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2289 } else {
2290 fault_dscr = 0;
2291 }
2292
2293 /* Handle synchronous data faults. */
2294 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2295 if (final_retval == ERROR_OK) {
2296 /* Final return value will reflect cause of fault. */
2297 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2298 if (retval == ERROR_OK) {
2299 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2300 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2301 } else
2302 final_retval = retval;
2303 }
2304 /* Fault destroyed DFAR/DFSR; restore them. */
2305 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2306 if (retval != ERROR_OK)
2307 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2308 }
2309
2310 /* Handle asynchronous data faults. */
2311 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2312 if (final_retval == ERROR_OK)
2313 /* No other error has been recorded so far, so keep this one. */
2314 final_retval = ERROR_TARGET_DATA_ABORT;
2315 }
2316
2317 /* If the DCC is nonempty, clear it. */
2318 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2319 uint32_t dummy;
2320 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2321 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2322 if (final_retval == ERROR_OK)
2323 final_retval = retval;
2324 }
2325 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2326 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2327 if (final_retval == ERROR_OK)
2328 final_retval = retval;
2329 }
2330
2331 /* Done. */
2332 return final_retval;
2333 }
2334
2335 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2336 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2337 {
2338 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2339 * in *dscr; updated to new value. This is slow because it works for
2340 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2341 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2342 * preferred.
2343 * Preconditions:
2344 * - Address is in R0.
2345 * - R0 is marked dirty.
2346 */
2347 struct armv7a_common *armv7a = target_to_armv7a(target);
2348 struct adiv5_dap *swjdp = armv7a->arm.dap;
2349 struct arm *arm = &armv7a->arm;
2350 int retval;
2351
2352 /* Mark register R1 as dirty, to use for transferring data. */
2353 arm_reg_current(arm, 1)->dirty = true;
2354
2355 /* Switch to non-blocking mode if not already in that mode. */
2356 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2357 if (retval != ERROR_OK)
2358 return retval;
2359
2360 /* Go through the objects. */
2361 while (count) {
2362 /* Issue a load of the appropriate size to R1. */
2363 uint32_t opcode, data;
2364 if (size == 1)
2365 opcode = ARMV4_5_LDRB_IP(1, 0);
2366 else if (size == 2)
2367 opcode = ARMV4_5_LDRH_IP(1, 0);
2368 else
2369 opcode = ARMV4_5_LDRW_IP(1, 0);
2370 retval = cortex_a_exec_opcode(target, opcode, dscr);
2371 if (retval != ERROR_OK)
2372 return retval;
2373
2374 /* Issue a write of R1 to DTRTX. */
2375 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2376 if (retval != ERROR_OK)
2377 return retval;
2378
2379 /* Check for faults and return early. */
2380 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2381 return ERROR_OK; /* A data fault is not considered a system failure. */
2382
2383 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2384 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2385 * must also check TXfull_l). Most of the time this will be free
2386 * because TXfull_l will be set immediately and cached in dscr. */
2387 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2388 DSCR_DTRTX_FULL_LATCHED, dscr);
2389 if (retval != ERROR_OK)
2390 return retval;
2391
2392 /* Read the value transferred to DTRTX into the buffer. */
2393 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2394 armv7a->debug_base + CPUDBG_DTRTX, &data);
2395 if (retval != ERROR_OK)
2396 return retval;
2397 if (size == 1)
2398 *buffer = (uint8_t) data;
2399 else if (size == 2)
2400 target_buffer_set_u16(target, buffer, (uint16_t) data);
2401 else
2402 target_buffer_set_u32(target, buffer, data);
2403
2404 /* Advance. */
2405 buffer += size;
2406 --count;
2407 }
2408
2409 return ERROR_OK;
2410 }
2411
2412 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2413 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2414 {
2415 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2416 * *dscr; updated to new value. This is fast but only works for word-sized
2417 * objects at aligned addresses.
2418 * Preconditions:
2419 * - Address is in R0 and must be a multiple of 4.
2420 * - R0 is marked dirty.
2421 */
2422 struct armv7a_common *armv7a = target_to_armv7a(target);
2423 struct adiv5_dap *swjdp = armv7a->arm.dap;
2424 uint32_t new_dscr, u32;
2425 int retval;
2426
2427 /* Switch to non-blocking mode if not already in that mode. */
2428 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2429 if (retval != ERROR_OK)
2430 return retval;
2431
2432 if (count > 1) {
2433 /* Consecutively issue the LDC instruction via a write to ITR and
2434 * change to fast mode, in a single bulk copy since DSCR == ITR + 4.
2435 * The instruction is issued into the core before the mode switch. */
2436 uint8_t command[8];
2437 target_buffer_set_u32(target, command, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2438 new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2439 target_buffer_set_u32(target, command + 4, new_dscr);
2440 retval = mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, command, 4, 2,
2441 armv7a->debug_base + CPUDBG_ITR);
2442 if (retval != ERROR_OK)
2443 return retval;
2444 *dscr = new_dscr;
2445
2446 /* Read the value transferred to DTRTX into the buffer. Due to fast
2447 * mode rules, this blocks until the instruction finishes executing and
2448 * then reissues the read instruction to read the next word from
2449 * memory. The last read of DTRTX in this call reads the second-to-last
2450 * word from memory and issues the read instruction for the last word.
2451 */
2452 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2453 4, count - 1, armv7a->debug_base + CPUDBG_DTRTX);
2454 if (retval != ERROR_OK)
2455 return retval;
2456
2457 /* Advance. */
2458 buffer += (count - 1) * 4;
2459 } else {
2460 /* Issue the LDC instruction via a write to ITR. */
2461 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2462 if (retval != ERROR_OK)
2463 return retval;
2464 }
2465
2466 /* Switch to non-blocking mode if not already in that mode. */
2467 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2468 if (retval != ERROR_OK)
2469 return retval;
2470
2471 /* Wait for last issued instruction to complete. */
2472 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2473 if (retval != ERROR_OK)
2474 return retval;
2475
2476 /* Check for faults and return early. */
2477 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2478 return ERROR_OK; /* A data fault is not considered a system failure. */
2479
2480 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2481 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2482 * check TXfull_l). Most of the time this will be free because TXfull_l
2483 * will be set immediately and cached in dscr. */
2484 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2485 DSCR_DTRTX_FULL_LATCHED, dscr);
2486 if (retval != ERROR_OK)
2487 return retval;
2488
2489 /* Read the value transferred to DTRTX into the buffer. This is the last
2490 * word. */
2491 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2492 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2493 if (retval != ERROR_OK)
2494 return retval;
2495 target_buffer_set_u32(target, buffer, u32);
2496
2497 return ERROR_OK;
2498 }
2499
2500 static int cortex_a_read_apb_ab_memory(struct target *target,
2501 uint32_t address, uint32_t size,
2502 uint32_t count, uint8_t *buffer)
2503 {
2504 /* Read memory through APB-AP. */
2505 int retval, final_retval;
2506 struct armv7a_common *armv7a = target_to_armv7a(target);
2507 struct adiv5_dap *swjdp = armv7a->arm.dap;
2508 struct arm *arm = &armv7a->arm;
2509 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2510
2511 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2512 address, size, count);
2513 if (target->state != TARGET_HALTED) {
2514 LOG_WARNING("target not halted");
2515 return ERROR_TARGET_NOT_HALTED;
2516 }
2517
2518 if (!count)
2519 return ERROR_OK;
2520
2521 /* Clear any abort. */
2522 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2523 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2524 if (retval != ERROR_OK)
2525 return retval;
2526
2527 /* Read DSCR */
2528 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2529 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2530 if (retval != ERROR_OK)
2531 return retval;
2532
2533 /* Switch to non-blocking mode if not already in that mode. */
2534 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2535 if (retval != ERROR_OK)
2536 goto out;
2537
2538 /* Mark R0 as dirty. */
2539 arm_reg_current(arm, 0)->dirty = true;
2540
2541 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2542 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2543 if (retval != ERROR_OK)
2544 goto out;
2545
2546 /* Get the memory address into R0. */
2547 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2548 armv7a->debug_base + CPUDBG_DTRRX, address);
2549 if (retval != ERROR_OK)
2550 goto out;
2551 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2552 if (retval != ERROR_OK)
2553 goto out;
2554
2555 if (size == 4 && (address % 4) == 0) {
2556 /* We are doing a word-aligned transfer, so use fast mode. */
2557 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2558 } else {
2559 /* Use slow path. */
2560 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2561 }
2562
2563 out:
2564 final_retval = retval;
2565
2566 /* Switch to non-blocking mode if not already in that mode. */
2567 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2568 if (final_retval == ERROR_OK)
2569 final_retval = retval;
2570
2571 /* Wait for last issued instruction to complete. */
2572 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2573 if (final_retval == ERROR_OK)
2574 final_retval = retval;
2575
2576 /* If there were any sticky abort flags, clear them. */
2577 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2578 fault_dscr = dscr;
2579 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2580 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2581 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2582 } else {
2583 fault_dscr = 0;
2584 }
2585
2586 /* Handle synchronous data faults. */
2587 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2588 if (final_retval == ERROR_OK) {
2589 /* Final return value will reflect cause of fault. */
2590 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2591 if (retval == ERROR_OK) {
2592 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2593 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2594 } else
2595 final_retval = retval;
2596 }
2597 /* Fault destroyed DFAR/DFSR; restore them. */
2598 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2599 if (retval != ERROR_OK)
2600 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2601 }
2602
2603 /* Handle asynchronous data faults. */
2604 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2605 if (final_retval == ERROR_OK)
2606 /* No other error has been recorded so far, so keep this one. */
2607 final_retval = ERROR_TARGET_DATA_ABORT;
2608 }
2609
2610 /* If the DCC is nonempty, clear it. */
2611 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2612 uint32_t dummy;
2613 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2614 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2615 if (final_retval == ERROR_OK)
2616 final_retval = retval;
2617 }
2618 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2619 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2620 if (final_retval == ERROR_OK)
2621 final_retval = retval;
2622 }
2623
2624 /* Done. */
2625 return final_retval;
2626 }
2627
2628
2629 /*
2630 * Cortex-A Memory access
2631 *
2632 * This is same Cortex M3 but we must also use the correct
2633 * ap number for every access.
2634 */
2635
2636 static int cortex_a_read_phys_memory(struct target *target,
2637 uint32_t address, uint32_t size,
2638 uint32_t count, uint8_t *buffer)
2639 {
2640 struct armv7a_common *armv7a = target_to_armv7a(target);
2641 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2642
2643 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2644 address, size, count);
2645
2646 if (count && buffer) {
2647 /* read memory through APB-AP */
2648 if (!armv7a->is_armv7r) {
2649 /* disable mmu */
2650 retval = cortex_a_mmu_modify(target, 0);
2651 if (retval != ERROR_OK)
2652 return retval;
2653 }
2654 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2655 }
2656 return retval;
2657 }
2658
2659 static int cortex_a_read_memory(struct target *target, uint32_t address,
2660 uint32_t size, uint32_t count, uint8_t *buffer)
2661 {
2662 int mmu_enabled = 0;
2663 int retval;
2664 struct armv7a_common *armv7a = target_to_armv7a(target);
2665
2666 /* cortex_a handles unaligned memory access */
2667 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2668 size, count);
2669
2670 /* determine if MMU was enabled on target stop */
2671 if (!armv7a->is_armv7r) {
2672 retval = cortex_a_mmu(target, &mmu_enabled);
2673 if (retval != ERROR_OK)
2674 return retval;
2675 }
2676
2677 if (mmu_enabled) {
2678 retval = cortex_a_check_address(target, address);
2679 if (retval != ERROR_OK)
2680 return retval;
2681 /* enable MMU as we could have disabled it for phys access */
2682 retval = cortex_a_mmu_modify(target, 1);
2683 if (retval != ERROR_OK)
2684 return retval;
2685 }
2686 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2687
2688 return retval;
2689 }
2690
2691 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2692 uint32_t size, uint32_t count, uint8_t *buffer)
2693 {
2694 int mmu_enabled = 0;
2695 uint32_t virt, phys;
2696 int retval;
2697 struct armv7a_common *armv7a = target_to_armv7a(target);
2698 struct adiv5_dap *swjdp = armv7a->arm.dap;
2699 uint8_t apsel = swjdp->apsel;
2700
2701 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap))
2702 return target_read_memory(target, address, size, count, buffer);
2703
2704 /* cortex_a handles unaligned memory access */
2705 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2706 size, count);
2707
2708 /* determine if MMU was enabled on target stop */
2709 if (!armv7a->is_armv7r) {
2710 retval = cortex_a_mmu(target, &mmu_enabled);
2711 if (retval != ERROR_OK)
2712 return retval;
2713 }
2714
2715 if (mmu_enabled) {
2716 virt = address;
2717 retval = cortex_a_virt2phys(target, virt, &phys);
2718 if (retval != ERROR_OK)
2719 return retval;
2720
2721 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2722 virt, phys);
2723 address = phys;
2724 }
2725
2726 if (!count || !buffer)
2727 return ERROR_COMMAND_SYNTAX_ERROR;
2728
2729 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2730
2731 return retval;
2732 }
2733
2734 static int cortex_a_write_phys_memory(struct target *target,
2735 uint32_t address, uint32_t size,
2736 uint32_t count, const uint8_t *buffer)
2737 {
2738 struct armv7a_common *armv7a = target_to_armv7a(target);
2739 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2740
2741 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2742 size, count);
2743
2744 if (count && buffer) {
2745 /* write memory through APB-AP */
2746 if (!armv7a->is_armv7r) {
2747 retval = cortex_a_mmu_modify(target, 0);
2748 if (retval != ERROR_OK)
2749 return retval;
2750 }
2751 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2752 }
2753
2754 return retval;
2755 }
2756
2757 static int cortex_a_write_memory(struct target *target, uint32_t address,
2758 uint32_t size, uint32_t count, const uint8_t *buffer)
2759 {
2760 int mmu_enabled = 0;
2761 int retval;
2762 struct armv7a_common *armv7a = target_to_armv7a(target);
2763
2764 /* cortex_a handles unaligned memory access */
2765 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2766 size, count);
2767
2768 /* determine if MMU was enabled on target stop */
2769 if (!armv7a->is_armv7r) {
2770 retval = cortex_a_mmu(target, &mmu_enabled);
2771 if (retval != ERROR_OK)
2772 return retval;
2773 }
2774
2775 if (mmu_enabled) {
2776 retval = cortex_a_check_address(target, address);
2777 if (retval != ERROR_OK)
2778 return retval;
2779 /* enable MMU as we could have disabled it for phys access */
2780 retval = cortex_a_mmu_modify(target, 1);
2781 if (retval != ERROR_OK)
2782 return retval;
2783 }
2784
2785 /* memory writes bypass the caches, must flush before writing */
2786 armv7a_cache_auto_flush_on_write(target, address, size * count);
2787
2788 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2789
2790 return retval;
2791 }
2792
2793 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2794 uint32_t size, uint32_t count, const uint8_t *buffer)
2795 {
2796 int mmu_enabled = 0;
2797 uint32_t virt, phys;
2798 int retval;
2799 struct armv7a_common *armv7a = target_to_armv7a(target);
2800 struct adiv5_dap *swjdp = armv7a->arm.dap;
2801 uint8_t apsel = swjdp->apsel;
2802
2803 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap))
2804 return target_write_memory(target, address, size, count, buffer);
2805
2806 /* cortex_a handles unaligned memory access */
2807 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2808 size, count);
2809
2810 /* determine if MMU was enabled on target stop */
2811 if (!armv7a->is_armv7r) {
2812 retval = cortex_a_mmu(target, &mmu_enabled);
2813 if (retval != ERROR_OK)
2814 return retval;
2815 }
2816
2817 if (mmu_enabled) {
2818 virt = address;
2819 retval = cortex_a_virt2phys(target, virt, &phys);
2820 if (retval != ERROR_OK)
2821 return retval;
2822
2823 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2824 virt,
2825 phys);
2826 address = phys;
2827 }
2828
2829 if (!count || !buffer)
2830 return ERROR_COMMAND_SYNTAX_ERROR;
2831
2832 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2833
2834 return retval;
2835 }
2836
2837 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2838 uint32_t count, uint8_t *buffer)
2839 {
2840 uint32_t size;
2841
2842 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2843 * will have something to do with the size we leave to it. */
2844 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2845 if (address & size) {
2846 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2847 if (retval != ERROR_OK)
2848 return retval;
2849 address += size;
2850 count -= size;
2851 buffer += size;
2852 }
2853 }
2854
2855 /* Read the data with as large access size as possible. */
2856 for (; size > 0; size /= 2) {
2857 uint32_t aligned = count - count % size;
2858 if (aligned > 0) {
2859 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2860 if (retval != ERROR_OK)
2861 return retval;
2862 address += aligned;
2863 count -= aligned;
2864 buffer += aligned;
2865 }
2866 }
2867
2868 return ERROR_OK;
2869 }
2870
2871 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2872 uint32_t count, const uint8_t *buffer)
2873 {
2874 uint32_t size;
2875
2876 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2877 * will have something to do with the size we leave to it. */
2878 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2879 if (address & size) {
2880 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2881 if (retval != ERROR_OK)
2882 return retval;
2883 address += size;
2884 count -= size;
2885 buffer += size;
2886 }
2887 }
2888
2889 /* Write the data with as large access size as possible. */
2890 for (; size > 0; size /= 2) {
2891 uint32_t aligned = count - count % size;
2892 if (aligned > 0) {
2893 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2894 if (retval != ERROR_OK)
2895 return retval;
2896 address += aligned;
2897 count -= aligned;
2898 buffer += aligned;
2899 }
2900 }
2901
2902 return ERROR_OK;
2903 }
2904
2905 static int cortex_a_handle_target_request(void *priv)
2906 {
2907 struct target *target = priv;
2908 struct armv7a_common *armv7a = target_to_armv7a(target);
2909 struct adiv5_dap *swjdp = armv7a->arm.dap;
2910 int retval;
2911
2912 if (!target_was_examined(target))
2913 return ERROR_OK;
2914 if (!target->dbg_msg_enabled)
2915 return ERROR_OK;
2916
2917 if (target->state == TARGET_RUNNING) {
2918 uint32_t request;
2919 uint32_t dscr;
2920 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2921 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2922
2923 /* check if we have data */
2924 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2925 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2926 armv7a->debug_base + CPUDBG_DTRTX, &request);
2927 if (retval == ERROR_OK) {
2928 target_request(target, request);
2929 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2930 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2931 }
2932 }
2933 }
2934
2935 return ERROR_OK;
2936 }
2937
2938 /*
2939 * Cortex-A target information and configuration
2940 */
2941
2942 static int cortex_a_examine_first(struct target *target)
2943 {
2944 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2945 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2946 struct adiv5_dap *swjdp = armv7a->arm.dap;
2947 int i;
2948 int retval = ERROR_OK;
2949 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2950
2951 /* We do one extra read to ensure DAP is configured,
2952 * we call ahbap_debugport_init(swjdp) instead
2953 */
2954 retval = ahbap_debugport_init(swjdp);
2955 if (retval != ERROR_OK)
2956 return retval;
2957
2958 /* Search for the APB-AB - it is needed for access to debug registers */
2959 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2960 if (retval != ERROR_OK) {
2961 LOG_ERROR("Could not find APB-AP for debug access");
2962 return retval;
2963 }
2964 /* Search for the AHB-AB */
2965 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2966 if (retval != ERROR_OK) {
2967 /* AHB-AP not found - use APB-AP */
2968 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2969 armv7a->memory_ap_available = false;
2970 } else {
2971 armv7a->memory_ap_available = true;
2972 }
2973
2974
2975 if (!target->dbgbase_set) {
2976 uint32_t dbgbase;
2977 /* Get ROM Table base */
2978 uint32_t apid;
2979 int32_t coreidx = target->coreid;
2980 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2981 target->cmd_name);
2982 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2983 if (retval != ERROR_OK)
2984 return retval;
2985 /* Lookup 0x15 -- Processor DAP */
2986 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2987 &armv7a->debug_base, &coreidx);
2988 if (retval != ERROR_OK) {
2989 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2990 target->cmd_name);
2991 return retval;
2992 }
2993 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2994 coreidx, armv7a->debug_base);
2995 } else
2996 armv7a->debug_base = target->dbgbase;
2997
2998 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2999 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3000 if (retval != ERROR_OK)
3001 return retval;
3002
3003 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3004 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3005 if (retval != ERROR_OK) {
3006 LOG_DEBUG("Examine %s failed", "CPUID");
3007 return retval;
3008 }
3009
3010 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3011 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
3012 if (retval != ERROR_OK) {
3013 LOG_DEBUG("Examine %s failed", "CTYPR");
3014 return retval;
3015 }
3016
3017 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3018 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
3019 if (retval != ERROR_OK) {
3020 LOG_DEBUG("Examine %s failed", "TTYPR");
3021 return retval;
3022 }
3023
3024 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3025 armv7a->debug_base + CPUDBG_DIDR, &didr);
3026 if (retval != ERROR_OK) {
3027 LOG_DEBUG("Examine %s failed", "DIDR");
3028 return retval;
3029 }
3030
3031 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3032 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
3033 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
3034 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3035
3036 cortex_a->cpuid = cpuid;
3037 cortex_a->ctypr = ctypr;
3038 cortex_a->ttypr = ttypr;
3039 cortex_a->didr = didr;
3040
3041 /* Unlocking the debug registers */
3042 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3043 CORTEX_A15_PARTNUM) {
3044
3045 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
3046 armv7a->debug_base + CPUDBG_OSLAR,
3047 0);
3048
3049 if (retval != ERROR_OK)
3050 return retval;
3051
3052 }
3053 /* Unlocking the debug registers */
3054 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3055 CORTEX_A7_PARTNUM) {
3056
3057 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
3058 armv7a->debug_base + CPUDBG_OSLAR,
3059 0);
3060
3061 if (retval != ERROR_OK)
3062 return retval;
3063
3064 }
3065 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3066 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3067
3068 if (retval != ERROR_OK)
3069 return retval;
3070
3071 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3072
3073 armv7a->arm.core_type = ARM_MODE_MON;
3074
3075 /* Avoid recreating the registers cache */
3076 if (!target_was_examined(target)) {
3077 retval = cortex_a_dpm_setup(cortex_a, didr);
3078 if (retval != ERROR_OK)
3079 return retval;
3080 }
3081
3082 /* Setup Breakpoint Register Pairs */
3083 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3084 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3085 cortex_a->brp_num_available = cortex_a->brp_num;
3086 free(cortex_a->brp_list);
3087 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3088 /* cortex_a->brb_enabled = ????; */
3089 for (i = 0; i < cortex_a->brp_num; i++) {
3090 cortex_a->brp_list[i].used = 0;
3091 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3092 cortex_a->brp_list[i].type = BRP_NORMAL;
3093 else
3094 cortex_a->brp_list[i].type = BRP_CONTEXT;
3095 cortex_a->brp_list[i].value = 0;
3096 cortex_a->brp_list[i].control = 0;
3097 cortex_a->brp_list[i].BRPn = i;
3098 }
3099
3100 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3101
3102 target_set_examined(target);
3103 return ERROR_OK;
3104 }
3105
3106 static int cortex_a_examine(struct target *target)
3107 {
3108 int retval = ERROR_OK;
3109
3110 /* Reestablish communication after target reset */
3111 retval = cortex_a_examine_first(target);
3112
3113 /* Configure core debug access */
3114 if (retval == ERROR_OK)
3115 retval = cortex_a_init_debug_access(target);
3116
3117 return retval;
3118 }
3119
3120 /*
3121 * Cortex-A target creation and initialization
3122 */
3123
3124 static int cortex_a_init_target(struct command_context *cmd_ctx,
3125 struct target *target)
3126 {
3127 /* examine_first() does a bunch of this */
3128 return ERROR_OK;
3129 }
3130
3131 static int cortex_a_init_arch_info(struct target *target,
3132 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3133 {
3134 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3135 struct adiv5_dap *dap = &armv7a->dap;
3136
3137 armv7a->arm.dap = dap;
3138
3139 /* Setup struct cortex_a_common */
3140 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3141 /* tap has no dap initialized */
3142 if (!tap->dap) {
3143 armv7a->arm.dap = dap;
3144 /* Setup struct cortex_a_common */
3145
3146 /* prepare JTAG information for the new target */
3147 cortex_a->jtag_info.tap = tap;
3148 cortex_a->jtag_info.scann_size = 4;
3149
3150 /* Leave (only) generic DAP stuff for debugport_init() */
3151 dap->jtag_info = &cortex_a->jtag_info;
3152
3153 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
3154 dap->tar_autoincr_block = (1 << 10);
3155 dap->memaccess_tck = 80;
3156 tap->dap = dap;
3157 } else
3158 armv7a->arm.dap = tap->dap;
3159
3160 cortex_a->fast_reg_read = 0;
3161
3162 /* register arch-specific functions */
3163 armv7a->examine_debug_reason = NULL;
3164
3165 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3166
3167 armv7a->pre_restore_context = NULL;
3168
3169 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3170
3171
3172 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3173
3174 /* REVISIT v7a setup should be in a v7a-specific routine */
3175 armv7a_init_arch_info(target, armv7a);
3176 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3177
3178 return ERROR_OK;
3179 }
3180
3181 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3182 {
3183 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3184
3185 cortex_a->armv7a_common.is_armv7r = false;
3186
3187 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3188 }
3189
3190 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3191 {
3192 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3193
3194 cortex_a->armv7a_common.is_armv7r = true;
3195
3196 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3197 }
3198
3199 static void cortex_a_deinit_target(struct target *target)
3200 {
3201 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3202 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3203
3204 free(cortex_a->brp_list);
3205 free(dpm->dbp);
3206 free(dpm->dwp);
3207 free(cortex_a);
3208 }
3209
3210 static int cortex_a_mmu(struct target *target, int *enabled)
3211 {
3212 if (target->state != TARGET_HALTED) {
3213 LOG_ERROR("%s: target not halted", __func__);
3214 return ERROR_TARGET_INVALID;
3215 }
3216
3217 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3218 return ERROR_OK;
3219 }
3220
3221 static int cortex_a_virt2phys(struct target *target,
3222 uint32_t virt, uint32_t *phys)
3223 {
3224 int retval = ERROR_FAIL;
3225 struct armv7a_common *armv7a = target_to_armv7a(target);
3226 struct adiv5_dap *swjdp = armv7a->arm.dap;
3227 uint8_t apsel = swjdp->apsel;
3228 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
3229 uint32_t ret;
3230 retval = armv7a_mmu_translate_va(target,
3231 virt, &ret);
3232 if (retval != ERROR_OK)
3233 goto done;
3234 *phys = ret;
3235 } else {/* use this method if armv7a->memory_ap not selected
3236 * mmu must be enable in order to get a correct translation */
3237 retval = cortex_a_mmu_modify(target, 1);
3238 if (retval != ERROR_OK)
3239 goto done;
3240 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3241 }
3242 done:
3243 return retval;
3244 }
3245
3246 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3247 {
3248 struct target *target = get_current_target(CMD_CTX);
3249 struct armv7a_common *armv7a = target_to_armv7a(target);
3250
3251 return armv7a_handle_cache_info_command(CMD_CTX,
3252 &armv7a->armv7a_mmu.armv7a_cache);
3253 }
3254
3255
3256 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3257 {
3258 struct target *target = get_current_target(CMD_CTX);
3259 if (!target_was_examined(target)) {
3260 LOG_ERROR("target not examined yet");
3261 return ERROR_FAIL;
3262 }
3263
3264 return cortex_a_init_debug_access(target);
3265 }
3266 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3267 {
3268 struct target *target = get_current_target(CMD_CTX);
3269 /* check target is an smp target */
3270 struct target_list *head;
3271 struct target *curr;
3272 head = target->head;
3273 target->smp = 0;
3274 if (head != (struct target_list *)NULL) {
3275 while (head != (struct target_list *)NULL) {
3276 curr = head->target;
3277 curr->smp = 0;
3278 head = head->next;
3279 }
3280 /* fixes the target display to the debugger */
3281 target->gdb_service->target = target;
3282 }
3283 return ERROR_OK;
3284 }
3285
3286 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3287 {
3288 struct target *target = get_current_target(CMD_CTX);
3289 struct target_list *head;
3290 struct target *curr;
3291 head = target->head;
3292 if (head != (struct target_list *)NULL) {
3293 target->smp = 1;
3294 while (head != (struct target_list *)NULL) {
3295 curr = head->target;
3296 curr->smp = 1;
3297 head = head->next;
3298 }
3299 }
3300 return ERROR_OK;
3301 }
3302
3303 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3304 {
3305 struct target *target = get_current_target(CMD_CTX);
3306 int retval = ERROR_OK;
3307 struct target_list *head;
3308 head = target->head;
3309 if (head != (struct target_list *)NULL) {
3310 if (CMD_ARGC == 1) {
3311 int coreid = 0;
3312 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3313 if (ERROR_OK != retval)
3314 return retval;
3315 target->gdb_service->core[1] = coreid;
3316
3317 }
3318 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3319 , target->gdb_service->core[1]);
3320 }
3321 return ERROR_OK;
3322 }
3323
3324 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3325 {
3326 struct target *target = get_current_target(CMD_CTX);
3327 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3328
3329 static const Jim_Nvp nvp_maskisr_modes[] = {
3330 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3331 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3332 { .name = NULL, .value = -1 },
3333 };
3334 const Jim_Nvp *n;
3335
3336 if (target->state != TARGET_HALTED) {
3337 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3338 return ERROR_OK;
3339 }
3340
3341 if (CMD_ARGC > 0) {
3342 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3343 if (n->name == NULL)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345 cortex_a->isrmasking_mode = n->value;
3346
3347 }
3348
3349 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3350 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3351
3352 return ERROR_OK;
3353 }
3354
3355 static const struct command_registration cortex_a_exec_command_handlers[] = {
3356 {
3357 .name = "cache_info",
3358 .handler = cortex_a_handle_cache_info_command,
3359 .mode = COMMAND_EXEC,
3360 .help = "display information about target caches",
3361 .usage = "",
3362 },
3363 {
3364 .name = "dbginit",
3365 .handler = cortex_a_handle_dbginit_command,
3366 .mode = COMMAND_EXEC,
3367 .help = "Initialize core debug",
3368 .usage = "",
3369 },
3370 { .name = "smp_off",
3371 .handler = cortex_a_handle_smp_off_command,
3372 .mode = COMMAND_EXEC,
3373 .help = "Stop smp handling",
3374 .usage = "",},
3375 {
3376 .name = "smp_on",
3377 .handler = cortex_a_handle_smp_on_command,
3378 .mode = COMMAND_EXEC,
3379 .help = "Restart smp handling",
3380 .usage = "",
3381 },
3382 {
3383 .name = "smp_gdb",
3384 .handler = cortex_a_handle_smp_gdb_command,
3385 .mode = COMMAND_EXEC,
3386 .help = "display/fix current core played to gdb",
3387 .usage = "",
3388 },
3389 {
3390 .name = "maskisr",
3391 .handler = handle_cortex_a_mask_interrupts_command,
3392 .mode = COMMAND_EXEC,
3393 .help = "mask cortex_a interrupts",
3394 .usage = "['on'|'off']",
3395 },
3396
3397
3398 COMMAND_REGISTRATION_DONE
3399 };
3400 static const struct command_registration cortex_a_command_handlers[] = {
3401 {
3402 .chain = arm_command_handlers,
3403 },
3404 {
3405 .chain = armv7a_command_handlers,
3406 },
3407 {
3408 .name = "cortex_a",
3409 .mode = COMMAND_ANY,
3410 .help = "Cortex-A command group",
3411 .usage = "",
3412 .chain = cortex_a_exec_command_handlers,
3413 },
3414 COMMAND_REGISTRATION_DONE
3415 };
3416
3417 struct target_type cortexa_target = {
3418 .name = "cortex_a",
3419 .deprecated_name = "cortex_a8",
3420
3421 .poll = cortex_a_poll,
3422 .arch_state = armv7a_arch_state,
3423
3424 .halt = cortex_a_halt,
3425 .resume = cortex_a_resume,
3426 .step = cortex_a_step,
3427
3428 .assert_reset = cortex_a_assert_reset,
3429 .deassert_reset = cortex_a_deassert_reset,
3430
3431 /* REVISIT allow exporting VFP3 registers ... */
3432 .get_gdb_reg_list = arm_get_gdb_reg_list,
3433
3434 .read_memory = cortex_a_read_memory,
3435 .write_memory = cortex_a_write_memory,
3436
3437 .read_buffer = cortex_a_read_buffer,
3438 .write_buffer = cortex_a_write_buffer,
3439
3440 .checksum_memory = arm_checksum_memory,
3441 .blank_check_memory = arm_blank_check_memory,
3442
3443 .run_algorithm = armv4_5_run_algorithm,
3444
3445 .add_breakpoint = cortex_a_add_breakpoint,
3446 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3447 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3448 .remove_breakpoint = cortex_a_remove_breakpoint,
3449 .add_watchpoint = NULL,
3450 .remove_watchpoint = NULL,
3451
3452 .commands = cortex_a_command_handlers,
3453 .target_create = cortex_a_target_create,
3454 .init_target = cortex_a_init_target,
3455 .examine = cortex_a_examine,
3456 .deinit_target = cortex_a_deinit_target,
3457
3458 .read_phys_memory = cortex_a_read_phys_memory,
3459 .write_phys_memory = cortex_a_write_phys_memory,
3460 .mmu = cortex_a_mmu,
3461 .virt2phys = cortex_a_virt2phys,
3462 };
3463
3464 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3465 {
3466 .name = "cache_info",
3467 .handler = cortex_a_handle_cache_info_command,
3468 .mode = COMMAND_EXEC,
3469 .help = "display information about target caches",
3470 .usage = "",
3471 },
3472 {
3473 .name = "dbginit",
3474 .handler = cortex_a_handle_dbginit_command,
3475 .mode = COMMAND_EXEC,
3476 .help = "Initialize core debug",
3477 .usage = "",
3478 },
3479 {
3480 .name = "maskisr",
3481 .handler = handle_cortex_a_mask_interrupts_command,
3482 .mode = COMMAND_EXEC,
3483 .help = "mask cortex_r4 interrupts",
3484 .usage = "['on'|'off']",
3485 },
3486
3487 COMMAND_REGISTRATION_DONE
3488 };
3489 static const struct command_registration cortex_r4_command_handlers[] = {
3490 {
3491 .chain = arm_command_handlers,
3492 },
3493 {
3494 .chain = armv7a_command_handlers,
3495 },
3496 {
3497 .name = "cortex_r4",
3498 .mode = COMMAND_ANY,
3499 .help = "Cortex-R4 command group",
3500 .usage = "",
3501 .chain = cortex_r4_exec_command_handlers,
3502 },
3503 COMMAND_REGISTRATION_DONE
3504 };
3505
3506 struct target_type cortexr4_target = {
3507 .name = "cortex_r4",
3508
3509 .poll = cortex_a_poll,
3510 .arch_state = armv7a_arch_state,
3511
3512 .halt = cortex_a_halt,
3513 .resume = cortex_a_resume,
3514 .step = cortex_a_step,
3515
3516 .assert_reset = cortex_a_assert_reset,
3517 .deassert_reset = cortex_a_deassert_reset,
3518
3519 /* REVISIT allow exporting VFP3 registers ... */
3520 .get_gdb_reg_list = arm_get_gdb_reg_list,
3521
3522 .read_memory = cortex_a_read_memory,
3523 .write_memory = cortex_a_write_memory,
3524
3525 .checksum_memory = arm_checksum_memory,
3526 .blank_check_memory = arm_blank_check_memory,
3527
3528 .run_algorithm = armv4_5_run_algorithm,
3529
3530 .add_breakpoint = cortex_a_add_breakpoint,
3531 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3532 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3533 .remove_breakpoint = cortex_a_remove_breakpoint,
3534 .add_watchpoint = NULL,
3535 .remove_watchpoint = NULL,
3536
3537 .commands = cortex_r4_command_handlers,
3538 .target_create = cortex_r4_target_create,
3539 .init_target = cortex_a_init_target,
3540 .examine = cortex_a_examine,
3541 .deinit_target = cortex_a_deinit_target,
3542 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)