target/arm: Remove usage of struct arm_jtag in ARMv7 targets
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 uint32_t virt, uint32_t *phys);
79 static int cortex_a_read_apb_ab_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 int mmu_enabled = 0;
110
111 if (phys_access == 0) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a_mmu(target, &mmu_enabled);
114 if (mmu_enabled)
115 cortex_a_mmu_modify(target, 1);
116 } else {
117 cortex_a_mmu(target, &mmu_enabled);
118 if (mmu_enabled)
119 cortex_a_mmu_modify(target, 0);
120 }
121 return ERROR_OK;
122 }
123
124 /*
125 * Restore ARM core after memory access.
126 * If !phys_access, switch to previous mode
127 * If phys_access, restore MMU setting
128 */
129 static int cortex_a_post_memaccess(struct target *target, int phys_access)
130 {
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132
133 if (phys_access == 0) {
134 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
135 } else {
136 int mmu_enabled = 0;
137 cortex_a_mmu(target, &mmu_enabled);
138 if (mmu_enabled)
139 cortex_a_mmu_modify(target, 1);
140 }
141 return ERROR_OK;
142 }
143
144
145 /* modify cp15_control_reg in order to enable or disable mmu for :
146 * - virt2phys address conversion
147 * - read or write memory in phys or virt address */
148 static int cortex_a_mmu_modify(struct target *target, int enable)
149 {
150 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
151 struct armv7a_common *armv7a = target_to_armv7a(target);
152 int retval = ERROR_OK;
153 int need_write = 0;
154
155 if (enable) {
156 /* if mmu enabled at target stop and mmu not enable */
157 if (!(cortex_a->cp15_control_reg & 0x1U)) {
158 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
159 return ERROR_FAIL;
160 }
161 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
162 cortex_a->cp15_control_reg_curr |= 0x1U;
163 need_write = 1;
164 }
165 } else {
166 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
167 cortex_a->cp15_control_reg_curr &= ~0x1U;
168 need_write = 1;
169 }
170 }
171
172 if (need_write) {
173 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
174 enable ? "enable mmu" : "disable mmu",
175 cortex_a->cp15_control_reg_curr);
176
177 retval = armv7a->arm.mcr(target, 15,
178 0, 0, /* op1, op2 */
179 1, 0, /* CRn, CRm */
180 cortex_a->cp15_control_reg_curr);
181 }
182 return retval;
183 }
184
185 /*
186 * Cortex-A Basic debug access, very low level assumes state is saved
187 */
188 static int cortex_a8_init_debug_access(struct target *target)
189 {
190 struct armv7a_common *armv7a = target_to_armv7a(target);
191 struct adiv5_dap *swjdp = armv7a->arm.dap;
192 int retval;
193
194 LOG_DEBUG(" ");
195
196 /* Unlocking the debug registers for modification
197 * The debugport might be uninitialised so try twice */
198 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
199 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
200 if (retval != ERROR_OK) {
201 /* try again */
202 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
203 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
204 if (retval == ERROR_OK)
205 LOG_USER(
206 "Locking debug access failed on first, but succeeded on second try.");
207 }
208
209 return retval;
210 }
211
212 /*
213 * Cortex-A Basic debug access, very low level assumes state is saved
214 */
215 static int cortex_a_init_debug_access(struct target *target)
216 {
217 struct armv7a_common *armv7a = target_to_armv7a(target);
218 struct adiv5_dap *swjdp = armv7a->arm.dap;
219 int retval;
220 uint32_t dbg_osreg;
221 uint32_t cortex_part_num;
222 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
223
224 LOG_DEBUG(" ");
225 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
226 CORTEX_A_MIDR_PARTNUM_SHIFT;
227
228 switch (cortex_part_num) {
229 case CORTEX_A7_PARTNUM:
230 case CORTEX_A15_PARTNUM:
231 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
232 armv7a->debug_base + CPUDBG_OSLSR,
233 &dbg_osreg);
234 if (retval != ERROR_OK)
235 return retval;
236
237 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
238
239 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
240 /* Unlocking the DEBUG OS registers for modification */
241 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
242 armv7a->debug_base + CPUDBG_OSLAR,
243 0);
244 break;
245
246 case CORTEX_A5_PARTNUM:
247 case CORTEX_A8_PARTNUM:
248 case CORTEX_A9_PARTNUM:
249 default:
250 retval = cortex_a8_init_debug_access(target);
251 }
252
253 if (retval != ERROR_OK)
254 return retval;
255 /* Clear Sticky Power Down status Bit in PRSR to enable access to
256 the registers in the Core Power Domain */
257 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
258 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
259 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
260
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Disable cacheline fills and force cache write-through in debug state */
265 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
266 armv7a->debug_base + CPUDBG_DSCCR, 0);
267 if (retval != ERROR_OK)
268 return retval;
269
270 /* Disable TLB lookup and refill/eviction in debug state */
271 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
272 armv7a->debug_base + CPUDBG_DSMCR, 0);
273 if (retval != ERROR_OK)
274 return retval;
275
276 /* Enabling of instruction execution in debug mode is done in debug_entry code */
277
278 /* Resync breakpoint registers */
279
280 /* Since this is likely called from init or reset, update target state information*/
281 return cortex_a_poll(target);
282 }
283
284 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
285 {
286 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
287 * Writes final value of DSCR into *dscr. Pass force to force always
288 * reading DSCR at least once. */
289 struct armv7a_common *armv7a = target_to_armv7a(target);
290 struct adiv5_dap *swjdp = armv7a->arm.dap;
291 long long then = timeval_ms();
292 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
293 force = false;
294 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
295 armv7a->debug_base + CPUDBG_DSCR, dscr);
296 if (retval != ERROR_OK) {
297 LOG_ERROR("Could not read DSCR register");
298 return retval;
299 }
300 if (timeval_ms() > then + 1000) {
301 LOG_ERROR("Timeout waiting for InstrCompl=1");
302 return ERROR_FAIL;
303 }
304 }
305 return ERROR_OK;
306 }
307
308 /* To reduce needless round-trips, pass in a pointer to the current
309 * DSCR value. Initialize it to zero if you just need to know the
310 * value on return from this function; or DSCR_INSTR_COMP if you
311 * happen to know that no instruction is pending.
312 */
313 static int cortex_a_exec_opcode(struct target *target,
314 uint32_t opcode, uint32_t *dscr_p)
315 {
316 uint32_t dscr;
317 int retval;
318 struct armv7a_common *armv7a = target_to_armv7a(target);
319 struct adiv5_dap *swjdp = armv7a->arm.dap;
320
321 dscr = dscr_p ? *dscr_p : 0;
322
323 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
324
325 /* Wait for InstrCompl bit to be set */
326 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
327 if (retval != ERROR_OK)
328 return retval;
329
330 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
331 armv7a->debug_base + CPUDBG_ITR, opcode);
332 if (retval != ERROR_OK)
333 return retval;
334
335 long long then = timeval_ms();
336 do {
337 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
338 armv7a->debug_base + CPUDBG_DSCR, &dscr);
339 if (retval != ERROR_OK) {
340 LOG_ERROR("Could not read DSCR register");
341 return retval;
342 }
343 if (timeval_ms() > then + 1000) {
344 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
345 return ERROR_FAIL;
346 }
347 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
348
349 if (dscr_p)
350 *dscr_p = dscr;
351
352 return retval;
353 }
354
355 /**************************************************************************
356 Read core register with very few exec_opcode, fast but needs work_area.
357 This can cause problems with MMU active.
358 **************************************************************************/
359 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
360 uint32_t *regfile)
361 {
362 int retval = ERROR_OK;
363 struct armv7a_common *armv7a = target_to_armv7a(target);
364 struct adiv5_dap *swjdp = armv7a->arm.dap;
365
366 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
367 if (retval != ERROR_OK)
368 return retval;
369 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
370 if (retval != ERROR_OK)
371 return retval;
372 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
373 if (retval != ERROR_OK)
374 return retval;
375
376 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
377 (uint8_t *)(&regfile[1]), 4, 15, address);
378
379 return retval;
380 }
381
382 static int cortex_a_dap_read_coreregister_u32(struct target *target,
383 uint32_t *value, int regnum)
384 {
385 int retval = ERROR_OK;
386 uint8_t reg = regnum&0xFF;
387 uint32_t dscr = 0;
388 struct armv7a_common *armv7a = target_to_armv7a(target);
389 struct adiv5_dap *swjdp = armv7a->arm.dap;
390
391 if (reg > 17)
392 return retval;
393
394 if (reg < 15) {
395 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
396 retval = cortex_a_exec_opcode(target,
397 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
398 &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 } else if (reg == 15) {
402 /* "MOV r0, r15"; then move r0 to DCCTX */
403 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
404 if (retval != ERROR_OK)
405 return retval;
406 retval = cortex_a_exec_opcode(target,
407 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
408 &dscr);
409 if (retval != ERROR_OK)
410 return retval;
411 } else {
412 /* "MRS r0, CPSR" or "MRS r0, SPSR"
413 * then move r0 to DCCTX
414 */
415 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
416 if (retval != ERROR_OK)
417 return retval;
418 retval = cortex_a_exec_opcode(target,
419 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
420 &dscr);
421 if (retval != ERROR_OK)
422 return retval;
423 }
424
425 /* Wait for DTRRXfull then read DTRRTX */
426 long long then = timeval_ms();
427 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
428 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
429 armv7a->debug_base + CPUDBG_DSCR, &dscr);
430 if (retval != ERROR_OK)
431 return retval;
432 if (timeval_ms() > then + 1000) {
433 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
434 return ERROR_FAIL;
435 }
436 }
437
438 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
439 armv7a->debug_base + CPUDBG_DTRTX, value);
440 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
441
442 return retval;
443 }
444
445 static int cortex_a_dap_write_coreregister_u32(struct target *target,
446 uint32_t value, int regnum)
447 {
448 int retval = ERROR_OK;
449 uint8_t Rd = regnum&0xFF;
450 uint32_t dscr;
451 struct armv7a_common *armv7a = target_to_armv7a(target);
452 struct adiv5_dap *swjdp = armv7a->arm.dap;
453
454 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
455
456 /* Check that DCCRX is not full */
457 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
458 armv7a->debug_base + CPUDBG_DSCR, &dscr);
459 if (retval != ERROR_OK)
460 return retval;
461 if (dscr & DSCR_DTR_RX_FULL) {
462 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
463 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
464 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
465 &dscr);
466 if (retval != ERROR_OK)
467 return retval;
468 }
469
470 if (Rd > 17)
471 return retval;
472
473 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
474 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
475 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
476 armv7a->debug_base + CPUDBG_DTRRX, value);
477 if (retval != ERROR_OK)
478 return retval;
479
480 if (Rd < 15) {
481 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
482 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
483 &dscr);
484
485 if (retval != ERROR_OK)
486 return retval;
487 } else if (Rd == 15) {
488 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
489 * then "mov r15, r0"
490 */
491 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
492 &dscr);
493 if (retval != ERROR_OK)
494 return retval;
495 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
496 if (retval != ERROR_OK)
497 return retval;
498 } else {
499 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
500 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
501 */
502 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
503 &dscr);
504 if (retval != ERROR_OK)
505 return retval;
506 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
507 &dscr);
508 if (retval != ERROR_OK)
509 return retval;
510
511 /* "Prefetch flush" after modifying execution status in CPSR */
512 if (Rd == 16) {
513 retval = cortex_a_exec_opcode(target,
514 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
515 &dscr);
516 if (retval != ERROR_OK)
517 return retval;
518 }
519 }
520
521 return retval;
522 }
523
524 /* Write to memory mapped registers directly with no cache or mmu handling */
525 static int cortex_a_dap_write_memap_register_u32(struct target *target,
526 uint32_t address,
527 uint32_t value)
528 {
529 int retval;
530 struct armv7a_common *armv7a = target_to_armv7a(target);
531 struct adiv5_dap *swjdp = armv7a->arm.dap;
532
533 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
534
535 return retval;
536 }
537
538 /*
539 * Cortex-A implementation of Debug Programmer's Model
540 *
541 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
542 * so there's no need to poll for it before executing an instruction.
543 *
544 * NOTE that in several of these cases the "stall" mode might be useful.
545 * It'd let us queue a few operations together... prepare/finish might
546 * be the places to enable/disable that mode.
547 */
548
549 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
550 {
551 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
552 }
553
554 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
555 {
556 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
557 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
558 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
559 }
560
561 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
562 uint32_t *dscr_p)
563 {
564 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
565 uint32_t dscr = DSCR_INSTR_COMP;
566 int retval;
567
568 if (dscr_p)
569 dscr = *dscr_p;
570
571 /* Wait for DTRRXfull */
572 long long then = timeval_ms();
573 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
574 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
575 a->armv7a_common.debug_base + CPUDBG_DSCR,
576 &dscr);
577 if (retval != ERROR_OK)
578 return retval;
579 if (timeval_ms() > then + 1000) {
580 LOG_ERROR("Timeout waiting for read dcc");
581 return ERROR_FAIL;
582 }
583 }
584
585 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
586 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
587 if (retval != ERROR_OK)
588 return retval;
589 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
590
591 if (dscr_p)
592 *dscr_p = dscr;
593
594 return retval;
595 }
596
597 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
598 {
599 struct cortex_a_common *a = dpm_to_a(dpm);
600 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
601 uint32_t dscr;
602 int retval;
603
604 /* set up invariant: INSTR_COMP is set after ever DPM operation */
605 long long then = timeval_ms();
606 for (;; ) {
607 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
608 a->armv7a_common.debug_base + CPUDBG_DSCR,
609 &dscr);
610 if (retval != ERROR_OK)
611 return retval;
612 if ((dscr & DSCR_INSTR_COMP) != 0)
613 break;
614 if (timeval_ms() > then + 1000) {
615 LOG_ERROR("Timeout waiting for dpm prepare");
616 return ERROR_FAIL;
617 }
618 }
619
620 /* this "should never happen" ... */
621 if (dscr & DSCR_DTR_RX_FULL) {
622 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
623 /* Clear DCCRX */
624 retval = cortex_a_exec_opcode(
625 a->armv7a_common.arm.target,
626 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
627 &dscr);
628 if (retval != ERROR_OK)
629 return retval;
630 }
631
632 return retval;
633 }
634
635 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
636 {
637 /* REVISIT what could be done here? */
638 return ERROR_OK;
639 }
640
641 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
642 uint32_t opcode, uint32_t data)
643 {
644 struct cortex_a_common *a = dpm_to_a(dpm);
645 int retval;
646 uint32_t dscr = DSCR_INSTR_COMP;
647
648 retval = cortex_a_write_dcc(a, data);
649 if (retval != ERROR_OK)
650 return retval;
651
652 return cortex_a_exec_opcode(
653 a->armv7a_common.arm.target,
654 opcode,
655 &dscr);
656 }
657
658 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
659 uint32_t opcode, uint32_t data)
660 {
661 struct cortex_a_common *a = dpm_to_a(dpm);
662 uint32_t dscr = DSCR_INSTR_COMP;
663 int retval;
664
665 retval = cortex_a_write_dcc(a, data);
666 if (retval != ERROR_OK)
667 return retval;
668
669 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
670 retval = cortex_a_exec_opcode(
671 a->armv7a_common.arm.target,
672 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
673 &dscr);
674 if (retval != ERROR_OK)
675 return retval;
676
677 /* then the opcode, taking data from R0 */
678 retval = cortex_a_exec_opcode(
679 a->armv7a_common.arm.target,
680 opcode,
681 &dscr);
682
683 return retval;
684 }
685
686 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
687 {
688 struct target *target = dpm->arm->target;
689 uint32_t dscr = DSCR_INSTR_COMP;
690
691 /* "Prefetch flush" after modifying execution status in CPSR */
692 return cortex_a_exec_opcode(target,
693 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
694 &dscr);
695 }
696
697 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
698 uint32_t opcode, uint32_t *data)
699 {
700 struct cortex_a_common *a = dpm_to_a(dpm);
701 int retval;
702 uint32_t dscr = DSCR_INSTR_COMP;
703
704 /* the opcode, writing data to DCC */
705 retval = cortex_a_exec_opcode(
706 a->armv7a_common.arm.target,
707 opcode,
708 &dscr);
709 if (retval != ERROR_OK)
710 return retval;
711
712 return cortex_a_read_dcc(a, data, &dscr);
713 }
714
715
716 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
717 uint32_t opcode, uint32_t *data)
718 {
719 struct cortex_a_common *a = dpm_to_a(dpm);
720 uint32_t dscr = DSCR_INSTR_COMP;
721 int retval;
722
723 /* the opcode, writing data to R0 */
724 retval = cortex_a_exec_opcode(
725 a->armv7a_common.arm.target,
726 opcode,
727 &dscr);
728 if (retval != ERROR_OK)
729 return retval;
730
731 /* write R0 to DCC */
732 retval = cortex_a_exec_opcode(
733 a->armv7a_common.arm.target,
734 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
735 &dscr);
736 if (retval != ERROR_OK)
737 return retval;
738
739 return cortex_a_read_dcc(a, data, &dscr);
740 }
741
742 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
743 uint32_t addr, uint32_t control)
744 {
745 struct cortex_a_common *a = dpm_to_a(dpm);
746 uint32_t vr = a->armv7a_common.debug_base;
747 uint32_t cr = a->armv7a_common.debug_base;
748 int retval;
749
750 switch (index_t) {
751 case 0 ... 15: /* breakpoints */
752 vr += CPUDBG_BVR_BASE;
753 cr += CPUDBG_BCR_BASE;
754 break;
755 case 16 ... 31: /* watchpoints */
756 vr += CPUDBG_WVR_BASE;
757 cr += CPUDBG_WCR_BASE;
758 index_t -= 16;
759 break;
760 default:
761 return ERROR_FAIL;
762 }
763 vr += 4 * index_t;
764 cr += 4 * index_t;
765
766 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
767 (unsigned) vr, (unsigned) cr);
768
769 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
770 vr, addr);
771 if (retval != ERROR_OK)
772 return retval;
773 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
774 cr, control);
775 return retval;
776 }
777
778 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
779 {
780 struct cortex_a_common *a = dpm_to_a(dpm);
781 uint32_t cr;
782
783 switch (index_t) {
784 case 0 ... 15:
785 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
786 break;
787 case 16 ... 31:
788 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
789 index_t -= 16;
790 break;
791 default:
792 return ERROR_FAIL;
793 }
794 cr += 4 * index_t;
795
796 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
797
798 /* clear control register */
799 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
800 }
801
802 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
803 {
804 struct arm_dpm *dpm = &a->armv7a_common.dpm;
805 int retval;
806
807 dpm->arm = &a->armv7a_common.arm;
808 dpm->didr = didr;
809
810 dpm->prepare = cortex_a_dpm_prepare;
811 dpm->finish = cortex_a_dpm_finish;
812
813 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
814 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
815 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
816
817 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
818 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
819
820 dpm->bpwp_enable = cortex_a_bpwp_enable;
821 dpm->bpwp_disable = cortex_a_bpwp_disable;
822
823 retval = arm_dpm_setup(dpm);
824 if (retval == ERROR_OK)
825 retval = arm_dpm_initialize(dpm);
826
827 return retval;
828 }
829 static struct target *get_cortex_a(struct target *target, int32_t coreid)
830 {
831 struct target_list *head;
832 struct target *curr;
833
834 head = target->head;
835 while (head != (struct target_list *)NULL) {
836 curr = head->target;
837 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
838 return curr;
839 head = head->next;
840 }
841 return target;
842 }
843 static int cortex_a_halt(struct target *target);
844
845 static int cortex_a_halt_smp(struct target *target)
846 {
847 int retval = 0;
848 struct target_list *head;
849 struct target *curr;
850 head = target->head;
851 while (head != (struct target_list *)NULL) {
852 curr = head->target;
853 if ((curr != target) && (curr->state != TARGET_HALTED))
854 retval += cortex_a_halt(curr);
855 head = head->next;
856 }
857 return retval;
858 }
859
860 static int update_halt_gdb(struct target *target)
861 {
862 int retval = 0;
863 if (target->gdb_service && target->gdb_service->core[0] == -1) {
864 target->gdb_service->target = target;
865 target->gdb_service->core[0] = target->coreid;
866 retval += cortex_a_halt_smp(target);
867 }
868 return retval;
869 }
870
871 /*
872 * Cortex-A Run control
873 */
874
875 static int cortex_a_poll(struct target *target)
876 {
877 int retval = ERROR_OK;
878 uint32_t dscr;
879 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
880 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
881 struct adiv5_dap *swjdp = armv7a->arm.dap;
882 enum target_state prev_target_state = target->state;
883 /* toggle to another core is done by gdb as follow */
884 /* maint packet J core_id */
885 /* continue */
886 /* the next polling trigger an halt event sent to gdb */
887 if ((target->state == TARGET_HALTED) && (target->smp) &&
888 (target->gdb_service) &&
889 (target->gdb_service->target == NULL)) {
890 target->gdb_service->target =
891 get_cortex_a(target, target->gdb_service->core[1]);
892 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
893 return retval;
894 }
895 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
896 armv7a->debug_base + CPUDBG_DSCR, &dscr);
897 if (retval != ERROR_OK)
898 return retval;
899 cortex_a->cpudbg_dscr = dscr;
900
901 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
902 if (prev_target_state != TARGET_HALTED) {
903 /* We have a halting debug event */
904 LOG_DEBUG("Target halted");
905 target->state = TARGET_HALTED;
906 if ((prev_target_state == TARGET_RUNNING)
907 || (prev_target_state == TARGET_UNKNOWN)
908 || (prev_target_state == TARGET_RESET)) {
909 retval = cortex_a_debug_entry(target);
910 if (retval != ERROR_OK)
911 return retval;
912 if (target->smp) {
913 retval = update_halt_gdb(target);
914 if (retval != ERROR_OK)
915 return retval;
916 }
917 target_call_event_callbacks(target,
918 TARGET_EVENT_HALTED);
919 }
920 if (prev_target_state == TARGET_DEBUG_RUNNING) {
921 LOG_DEBUG(" ");
922
923 retval = cortex_a_debug_entry(target);
924 if (retval != ERROR_OK)
925 return retval;
926 if (target->smp) {
927 retval = update_halt_gdb(target);
928 if (retval != ERROR_OK)
929 return retval;
930 }
931
932 target_call_event_callbacks(target,
933 TARGET_EVENT_DEBUG_HALTED);
934 }
935 }
936 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
937 target->state = TARGET_RUNNING;
938 else {
939 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
940 target->state = TARGET_UNKNOWN;
941 }
942
943 return retval;
944 }
945
946 static int cortex_a_halt(struct target *target)
947 {
948 int retval = ERROR_OK;
949 uint32_t dscr;
950 struct armv7a_common *armv7a = target_to_armv7a(target);
951 struct adiv5_dap *swjdp = armv7a->arm.dap;
952
953 /*
954 * Tell the core to be halted by writing DRCR with 0x1
955 * and then wait for the core to be halted.
956 */
957 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
958 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
959 if (retval != ERROR_OK)
960 return retval;
961
962 /*
963 * enter halting debug mode
964 */
965 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
966 armv7a->debug_base + CPUDBG_DSCR, &dscr);
967 if (retval != ERROR_OK)
968 return retval;
969
970 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
971 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
972 if (retval != ERROR_OK)
973 return retval;
974
975 long long then = timeval_ms();
976 for (;; ) {
977 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
978 armv7a->debug_base + CPUDBG_DSCR, &dscr);
979 if (retval != ERROR_OK)
980 return retval;
981 if ((dscr & DSCR_CORE_HALTED) != 0)
982 break;
983 if (timeval_ms() > then + 1000) {
984 LOG_ERROR("Timeout waiting for halt");
985 return ERROR_FAIL;
986 }
987 }
988
989 target->debug_reason = DBG_REASON_DBGRQ;
990
991 return ERROR_OK;
992 }
993
994 static int cortex_a_internal_restore(struct target *target, int current,
995 uint32_t *address, int handle_breakpoints, int debug_execution)
996 {
997 struct armv7a_common *armv7a = target_to_armv7a(target);
998 struct arm *arm = &armv7a->arm;
999 int retval;
1000 uint32_t resume_pc;
1001
1002 if (!debug_execution)
1003 target_free_all_working_areas(target);
1004
1005 #if 0
1006 if (debug_execution) {
1007 /* Disable interrupts */
1008 /* We disable interrupts in the PRIMASK register instead of
1009 * masking with C_MASKINTS,
1010 * This is probably the same issue as Cortex-M3 Errata 377493:
1011 * C_MASKINTS in parallel with disabled interrupts can cause
1012 * local faults to not be taken. */
1013 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
1014 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
1015 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
1016
1017 /* Make sure we are in Thumb mode */
1018 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
1019 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
1020 32) | (1 << 24));
1021 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
1022 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
1023 }
1024 #endif
1025
1026 /* current = 1: continue on current pc, otherwise continue at <address> */
1027 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
1028 if (!current)
1029 resume_pc = *address;
1030 else
1031 *address = resume_pc;
1032
1033 /* Make sure that the Armv7 gdb thumb fixups does not
1034 * kill the return address
1035 */
1036 switch (arm->core_state) {
1037 case ARM_STATE_ARM:
1038 resume_pc &= 0xFFFFFFFC;
1039 break;
1040 case ARM_STATE_THUMB:
1041 case ARM_STATE_THUMB_EE:
1042 /* When the return address is loaded into PC
1043 * bit 0 must be 1 to stay in Thumb state
1044 */
1045 resume_pc |= 0x1;
1046 break;
1047 case ARM_STATE_JAZELLE:
1048 LOG_ERROR("How do I resume into Jazelle state??");
1049 return ERROR_FAIL;
1050 }
1051 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1052 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1053 arm->pc->dirty = 1;
1054 arm->pc->valid = 1;
1055 /* restore dpm_mode at system halt */
1056 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1057 /* called it now before restoring context because it uses cpu
1058 * register r0 for restoring cp15 control register */
1059 retval = cortex_a_restore_cp15_control_reg(target);
1060 if (retval != ERROR_OK)
1061 return retval;
1062 retval = cortex_a_restore_context(target, handle_breakpoints);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 target->debug_reason = DBG_REASON_NOTHALTED;
1066 target->state = TARGET_RUNNING;
1067
1068 /* registers are now invalid */
1069 register_cache_invalidate(arm->core_cache);
1070
1071 #if 0
1072 /* the front-end may request us not to handle breakpoints */
1073 if (handle_breakpoints) {
1074 /* Single step past breakpoint at current address */
1075 breakpoint = breakpoint_find(target, resume_pc);
1076 if (breakpoint) {
1077 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1078 cortex_m3_unset_breakpoint(target, breakpoint);
1079 cortex_m3_single_step_core(target);
1080 cortex_m3_set_breakpoint(target, breakpoint);
1081 }
1082 }
1083
1084 #endif
1085 return retval;
1086 }
1087
1088 static int cortex_a_internal_restart(struct target *target)
1089 {
1090 struct armv7a_common *armv7a = target_to_armv7a(target);
1091 struct arm *arm = &armv7a->arm;
1092 struct adiv5_dap *swjdp = arm->dap;
1093 int retval;
1094 uint32_t dscr;
1095 /*
1096 * * Restart core and wait for it to be started. Clear ITRen and sticky
1097 * * exception flags: see ARMv7 ARM, C5.9.
1098 *
1099 * REVISIT: for single stepping, we probably want to
1100 * disable IRQs by default, with optional override...
1101 */
1102
1103 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1104 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1105 if (retval != ERROR_OK)
1106 return retval;
1107
1108 if ((dscr & DSCR_INSTR_COMP) == 0)
1109 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1110
1111 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1112 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1113 if (retval != ERROR_OK)
1114 return retval;
1115
1116 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1117 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1118 DRCR_CLEAR_EXCEPTIONS);
1119 if (retval != ERROR_OK)
1120 return retval;
1121
1122 long long then = timeval_ms();
1123 for (;; ) {
1124 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1125 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1126 if (retval != ERROR_OK)
1127 return retval;
1128 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1129 break;
1130 if (timeval_ms() > then + 1000) {
1131 LOG_ERROR("Timeout waiting for resume");
1132 return ERROR_FAIL;
1133 }
1134 }
1135
1136 target->debug_reason = DBG_REASON_NOTHALTED;
1137 target->state = TARGET_RUNNING;
1138
1139 /* registers are now invalid */
1140 register_cache_invalidate(arm->core_cache);
1141
1142 return ERROR_OK;
1143 }
1144
1145 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1146 {
1147 int retval = 0;
1148 struct target_list *head;
1149 struct target *curr;
1150 uint32_t address;
1151 head = target->head;
1152 while (head != (struct target_list *)NULL) {
1153 curr = head->target;
1154 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1155 /* resume current address , not in step mode */
1156 retval += cortex_a_internal_restore(curr, 1, &address,
1157 handle_breakpoints, 0);
1158 retval += cortex_a_internal_restart(curr);
1159 }
1160 head = head->next;
1161
1162 }
1163 return retval;
1164 }
1165
1166 static int cortex_a_resume(struct target *target, int current,
1167 uint32_t address, int handle_breakpoints, int debug_execution)
1168 {
1169 int retval = 0;
1170 /* dummy resume for smp toggle in order to reduce gdb impact */
1171 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1172 /* simulate a start and halt of target */
1173 target->gdb_service->target = NULL;
1174 target->gdb_service->core[0] = target->gdb_service->core[1];
1175 /* fake resume at next poll we play the target core[1], see poll*/
1176 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1177 return 0;
1178 }
1179 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1180 if (target->smp) {
1181 target->gdb_service->core[0] = -1;
1182 retval = cortex_a_restore_smp(target, handle_breakpoints);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 }
1186 cortex_a_internal_restart(target);
1187
1188 if (!debug_execution) {
1189 target->state = TARGET_RUNNING;
1190 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1191 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1192 } else {
1193 target->state = TARGET_DEBUG_RUNNING;
1194 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1195 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1196 }
1197
1198 return ERROR_OK;
1199 }
1200
1201 static int cortex_a_debug_entry(struct target *target)
1202 {
1203 int i;
1204 uint32_t regfile[16], cpsr, dscr;
1205 int retval = ERROR_OK;
1206 struct working_area *regfile_working_area = NULL;
1207 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1208 struct armv7a_common *armv7a = target_to_armv7a(target);
1209 struct arm *arm = &armv7a->arm;
1210 struct adiv5_dap *swjdp = armv7a->arm.dap;
1211 struct reg *reg;
1212
1213 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1214
1215 /* REVISIT surely we should not re-read DSCR !! */
1216 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1217 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1218 if (retval != ERROR_OK)
1219 return retval;
1220
1221 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1222 * imprecise data aborts get discarded by issuing a Data
1223 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1224 */
1225
1226 /* Enable the ITR execution once we are in debug mode */
1227 dscr |= DSCR_ITR_EN;
1228 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1229 armv7a->debug_base + CPUDBG_DSCR, dscr);
1230 if (retval != ERROR_OK)
1231 return retval;
1232
1233 /* Examine debug reason */
1234 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1235
1236 /* save address of instruction that triggered the watchpoint? */
1237 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1238 uint32_t wfar;
1239
1240 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1241 armv7a->debug_base + CPUDBG_WFAR,
1242 &wfar);
1243 if (retval != ERROR_OK)
1244 return retval;
1245 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1246 }
1247
1248 /* REVISIT fast_reg_read is never set ... */
1249
1250 /* Examine target state and mode */
1251 if (cortex_a->fast_reg_read)
1252 target_alloc_working_area(target, 64, &regfile_working_area);
1253
1254 /* First load register acessible through core debug port*/
1255 if (!regfile_working_area)
1256 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1257 else {
1258 retval = cortex_a_read_regs_through_mem(target,
1259 regfile_working_area->address, regfile);
1260
1261 target_free_working_area(target, regfile_working_area);
1262 if (retval != ERROR_OK)
1263 return retval;
1264
1265 /* read Current PSR */
1266 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1267 /* store current cpsr */
1268 if (retval != ERROR_OK)
1269 return retval;
1270
1271 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1272
1273 arm_set_cpsr(arm, cpsr);
1274
1275 /* update cache */
1276 for (i = 0; i <= ARM_PC; i++) {
1277 reg = arm_reg_current(arm, i);
1278
1279 buf_set_u32(reg->value, 0, 32, regfile[i]);
1280 reg->valid = 1;
1281 reg->dirty = 0;
1282 }
1283
1284 /* Fixup PC Resume Address */
1285 if (cpsr & (1 << 5)) {
1286 /* T bit set for Thumb or ThumbEE state */
1287 regfile[ARM_PC] -= 4;
1288 } else {
1289 /* ARM state */
1290 regfile[ARM_PC] -= 8;
1291 }
1292
1293 reg = arm->pc;
1294 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1295 reg->dirty = reg->valid;
1296 }
1297
1298 #if 0
1299 /* TODO, Move this */
1300 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1301 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1302 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1303
1304 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1305 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1306
1307 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1308 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1309 #endif
1310
1311 /* Are we in an exception handler */
1312 /* armv4_5->exception_number = 0; */
1313 if (armv7a->post_debug_entry) {
1314 retval = armv7a->post_debug_entry(target);
1315 if (retval != ERROR_OK)
1316 return retval;
1317 }
1318
1319 return retval;
1320 }
1321
1322 static int cortex_a_post_debug_entry(struct target *target)
1323 {
1324 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1325 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1326 int retval;
1327
1328 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1329 retval = armv7a->arm.mrc(target, 15,
1330 0, 0, /* op1, op2 */
1331 1, 0, /* CRn, CRm */
1332 &cortex_a->cp15_control_reg);
1333 if (retval != ERROR_OK)
1334 return retval;
1335 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1336 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1337
1338 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1339 armv7a_identify_cache(target);
1340
1341 if (armv7a->is_armv7r) {
1342 armv7a->armv7a_mmu.mmu_enabled = 0;
1343 } else {
1344 armv7a->armv7a_mmu.mmu_enabled =
1345 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1346 }
1347 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1348 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1349 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1350 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1351 cortex_a->curr_mode = armv7a->arm.core_mode;
1352
1353 return ERROR_OK;
1354 }
1355
1356 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1357 {
1358 struct armv7a_common *armv7a = target_to_armv7a(target);
1359 struct adiv5_dap *swjdp = armv7a->arm.dap;
1360 uint32_t dscr;
1361
1362 /* Read DSCR */
1363 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1364 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1365 if (ERROR_OK != retval)
1366 return retval;
1367
1368 /* clear bitfield */
1369 dscr &= ~bit_mask;
1370 /* put new value */
1371 dscr |= value & bit_mask;
1372
1373 /* write new DSCR */
1374 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1375 armv7a->debug_base + CPUDBG_DSCR, dscr);
1376 return retval;
1377 }
1378
1379 static int cortex_a_step(struct target *target, int current, uint32_t address,
1380 int handle_breakpoints)
1381 {
1382 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1383 struct armv7a_common *armv7a = target_to_armv7a(target);
1384 struct arm *arm = &armv7a->arm;
1385 struct breakpoint *breakpoint = NULL;
1386 struct breakpoint stepbreakpoint;
1387 struct reg *r;
1388 int retval;
1389
1390 if (target->state != TARGET_HALTED) {
1391 LOG_WARNING("target not halted");
1392 return ERROR_TARGET_NOT_HALTED;
1393 }
1394
1395 /* current = 1: continue on current pc, otherwise continue at <address> */
1396 r = arm->pc;
1397 if (!current)
1398 buf_set_u32(r->value, 0, 32, address);
1399 else
1400 address = buf_get_u32(r->value, 0, 32);
1401
1402 /* The front-end may request us not to handle breakpoints.
1403 * But since Cortex-A uses breakpoint for single step,
1404 * we MUST handle breakpoints.
1405 */
1406 handle_breakpoints = 1;
1407 if (handle_breakpoints) {
1408 breakpoint = breakpoint_find(target, address);
1409 if (breakpoint)
1410 cortex_a_unset_breakpoint(target, breakpoint);
1411 }
1412
1413 /* Setup single step breakpoint */
1414 stepbreakpoint.address = address;
1415 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1416 ? 2 : 4;
1417 stepbreakpoint.type = BKPT_HARD;
1418 stepbreakpoint.set = 0;
1419
1420 /* Disable interrupts during single step if requested */
1421 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1422 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1423 if (ERROR_OK != retval)
1424 return retval;
1425 }
1426
1427 /* Break on IVA mismatch */
1428 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1429
1430 target->debug_reason = DBG_REASON_SINGLESTEP;
1431
1432 retval = cortex_a_resume(target, 1, address, 0, 0);
1433 if (retval != ERROR_OK)
1434 return retval;
1435
1436 long long then = timeval_ms();
1437 while (target->state != TARGET_HALTED) {
1438 retval = cortex_a_poll(target);
1439 if (retval != ERROR_OK)
1440 return retval;
1441 if (timeval_ms() > then + 1000) {
1442 LOG_ERROR("timeout waiting for target halt");
1443 return ERROR_FAIL;
1444 }
1445 }
1446
1447 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1448
1449 /* Re-enable interrupts if they were disabled */
1450 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1451 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1452 if (ERROR_OK != retval)
1453 return retval;
1454 }
1455
1456
1457 target->debug_reason = DBG_REASON_BREAKPOINT;
1458
1459 if (breakpoint)
1460 cortex_a_set_breakpoint(target, breakpoint, 0);
1461
1462 if (target->state != TARGET_HALTED)
1463 LOG_DEBUG("target stepped");
1464
1465 return ERROR_OK;
1466 }
1467
1468 static int cortex_a_restore_context(struct target *target, bool bpwp)
1469 {
1470 struct armv7a_common *armv7a = target_to_armv7a(target);
1471
1472 LOG_DEBUG(" ");
1473
1474 if (armv7a->pre_restore_context)
1475 armv7a->pre_restore_context(target);
1476
1477 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1478 }
1479
1480 /*
1481 * Cortex-A Breakpoint and watchpoint functions
1482 */
1483
1484 /* Setup hardware Breakpoint Register Pair */
1485 static int cortex_a_set_breakpoint(struct target *target,
1486 struct breakpoint *breakpoint, uint8_t matchmode)
1487 {
1488 int retval;
1489 int brp_i = 0;
1490 uint32_t control;
1491 uint8_t byte_addr_select = 0x0F;
1492 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1493 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1494 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1495
1496 if (breakpoint->set) {
1497 LOG_WARNING("breakpoint already set");
1498 return ERROR_OK;
1499 }
1500
1501 if (breakpoint->type == BKPT_HARD) {
1502 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1503 brp_i++;
1504 if (brp_i >= cortex_a->brp_num) {
1505 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1506 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1507 }
1508 breakpoint->set = brp_i + 1;
1509 if (breakpoint->length == 2)
1510 byte_addr_select = (3 << (breakpoint->address & 0x02));
1511 control = ((matchmode & 0x7) << 20)
1512 | (byte_addr_select << 5)
1513 | (3 << 1) | 1;
1514 brp_list[brp_i].used = 1;
1515 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1516 brp_list[brp_i].control = control;
1517 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1518 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1519 brp_list[brp_i].value);
1520 if (retval != ERROR_OK)
1521 return retval;
1522 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1523 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1524 brp_list[brp_i].control);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1528 brp_list[brp_i].control,
1529 brp_list[brp_i].value);
1530 } else if (breakpoint->type == BKPT_SOFT) {
1531 uint8_t code[4];
1532 if (breakpoint->length == 2)
1533 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1534 else
1535 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1536 retval = target_read_memory(target,
1537 breakpoint->address & 0xFFFFFFFE,
1538 breakpoint->length, 1,
1539 breakpoint->orig_instr);
1540 if (retval != ERROR_OK)
1541 return retval;
1542
1543 /* make sure data cache is cleaned & invalidated down to PoC */
1544 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1545 armv7a_cache_flush_virt(target, breakpoint->address,
1546 breakpoint->length);
1547 }
1548
1549 retval = target_write_memory(target,
1550 breakpoint->address & 0xFFFFFFFE,
1551 breakpoint->length, 1, code);
1552 if (retval != ERROR_OK)
1553 return retval;
1554
1555 /* update i-cache at breakpoint location */
1556 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1557 breakpoint->length);
1558 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1559 breakpoint->length);
1560
1561 breakpoint->set = 0x11; /* Any nice value but 0 */
1562 }
1563
1564 return ERROR_OK;
1565 }
1566
1567 static int cortex_a_set_context_breakpoint(struct target *target,
1568 struct breakpoint *breakpoint, uint8_t matchmode)
1569 {
1570 int retval = ERROR_FAIL;
1571 int brp_i = 0;
1572 uint32_t control;
1573 uint8_t byte_addr_select = 0x0F;
1574 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1575 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1576 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1577
1578 if (breakpoint->set) {
1579 LOG_WARNING("breakpoint already set");
1580 return retval;
1581 }
1582 /*check available context BRPs*/
1583 while ((brp_list[brp_i].used ||
1584 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1585 brp_i++;
1586
1587 if (brp_i >= cortex_a->brp_num) {
1588 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1589 return ERROR_FAIL;
1590 }
1591
1592 breakpoint->set = brp_i + 1;
1593 control = ((matchmode & 0x7) << 20)
1594 | (byte_addr_select << 5)
1595 | (3 << 1) | 1;
1596 brp_list[brp_i].used = 1;
1597 brp_list[brp_i].value = (breakpoint->asid);
1598 brp_list[brp_i].control = control;
1599 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1600 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1601 brp_list[brp_i].value);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1605 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1606 brp_list[brp_i].control);
1607 if (retval != ERROR_OK)
1608 return retval;
1609 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1610 brp_list[brp_i].control,
1611 brp_list[brp_i].value);
1612 return ERROR_OK;
1613
1614 }
1615
1616 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1617 {
1618 int retval = ERROR_FAIL;
1619 int brp_1 = 0; /* holds the contextID pair */
1620 int brp_2 = 0; /* holds the IVA pair */
1621 uint32_t control_CTX, control_IVA;
1622 uint8_t CTX_byte_addr_select = 0x0F;
1623 uint8_t IVA_byte_addr_select = 0x0F;
1624 uint8_t CTX_machmode = 0x03;
1625 uint8_t IVA_machmode = 0x01;
1626 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1627 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1628 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1629
1630 if (breakpoint->set) {
1631 LOG_WARNING("breakpoint already set");
1632 return retval;
1633 }
1634 /*check available context BRPs*/
1635 while ((brp_list[brp_1].used ||
1636 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1637 brp_1++;
1638
1639 printf("brp(CTX) found num: %d\n", brp_1);
1640 if (brp_1 >= cortex_a->brp_num) {
1641 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1642 return ERROR_FAIL;
1643 }
1644
1645 while ((brp_list[brp_2].used ||
1646 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1647 brp_2++;
1648
1649 printf("brp(IVA) found num: %d\n", brp_2);
1650 if (brp_2 >= cortex_a->brp_num) {
1651 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1652 return ERROR_FAIL;
1653 }
1654
1655 breakpoint->set = brp_1 + 1;
1656 breakpoint->linked_BRP = brp_2;
1657 control_CTX = ((CTX_machmode & 0x7) << 20)
1658 | (brp_2 << 16)
1659 | (0 << 14)
1660 | (CTX_byte_addr_select << 5)
1661 | (3 << 1) | 1;
1662 brp_list[brp_1].used = 1;
1663 brp_list[brp_1].value = (breakpoint->asid);
1664 brp_list[brp_1].control = control_CTX;
1665 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1666 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1667 brp_list[brp_1].value);
1668 if (retval != ERROR_OK)
1669 return retval;
1670 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1671 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1672 brp_list[brp_1].control);
1673 if (retval != ERROR_OK)
1674 return retval;
1675
1676 control_IVA = ((IVA_machmode & 0x7) << 20)
1677 | (brp_1 << 16)
1678 | (IVA_byte_addr_select << 5)
1679 | (3 << 1) | 1;
1680 brp_list[brp_2].used = 1;
1681 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1682 brp_list[brp_2].control = control_IVA;
1683 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1684 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1685 brp_list[brp_2].value);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1689 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1690 brp_list[brp_2].control);
1691 if (retval != ERROR_OK)
1692 return retval;
1693
1694 return ERROR_OK;
1695 }
1696
1697 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1698 {
1699 int retval;
1700 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1701 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1702 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1703
1704 if (!breakpoint->set) {
1705 LOG_WARNING("breakpoint not set");
1706 return ERROR_OK;
1707 }
1708
1709 if (breakpoint->type == BKPT_HARD) {
1710 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1711 int brp_i = breakpoint->set - 1;
1712 int brp_j = breakpoint->linked_BRP;
1713 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1714 LOG_DEBUG("Invalid BRP number in breakpoint");
1715 return ERROR_OK;
1716 }
1717 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1718 brp_list[brp_i].control, brp_list[brp_i].value);
1719 brp_list[brp_i].used = 0;
1720 brp_list[brp_i].value = 0;
1721 brp_list[brp_i].control = 0;
1722 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1723 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1724 brp_list[brp_i].control);
1725 if (retval != ERROR_OK)
1726 return retval;
1727 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1728 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1729 brp_list[brp_i].value);
1730 if (retval != ERROR_OK)
1731 return retval;
1732 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1733 LOG_DEBUG("Invalid BRP number in breakpoint");
1734 return ERROR_OK;
1735 }
1736 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1737 brp_list[brp_j].control, brp_list[brp_j].value);
1738 brp_list[brp_j].used = 0;
1739 brp_list[brp_j].value = 0;
1740 brp_list[brp_j].control = 0;
1741 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1742 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1743 brp_list[brp_j].control);
1744 if (retval != ERROR_OK)
1745 return retval;
1746 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1747 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1748 brp_list[brp_j].value);
1749 if (retval != ERROR_OK)
1750 return retval;
1751 breakpoint->linked_BRP = 0;
1752 breakpoint->set = 0;
1753 return ERROR_OK;
1754
1755 } else {
1756 int brp_i = breakpoint->set - 1;
1757 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1758 LOG_DEBUG("Invalid BRP number in breakpoint");
1759 return ERROR_OK;
1760 }
1761 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1762 brp_list[brp_i].control, brp_list[brp_i].value);
1763 brp_list[brp_i].used = 0;
1764 brp_list[brp_i].value = 0;
1765 brp_list[brp_i].control = 0;
1766 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1767 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1768 brp_list[brp_i].control);
1769 if (retval != ERROR_OK)
1770 return retval;
1771 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1772 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1773 brp_list[brp_i].value);
1774 if (retval != ERROR_OK)
1775 return retval;
1776 breakpoint->set = 0;
1777 return ERROR_OK;
1778 }
1779 } else {
1780
1781 /* make sure data cache is cleaned & invalidated down to PoC */
1782 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1783 armv7a_cache_flush_virt(target, breakpoint->address,
1784 breakpoint->length);
1785 }
1786
1787 /* restore original instruction (kept in target endianness) */
1788 if (breakpoint->length == 4) {
1789 retval = target_write_memory(target,
1790 breakpoint->address & 0xFFFFFFFE,
1791 4, 1, breakpoint->orig_instr);
1792 if (retval != ERROR_OK)
1793 return retval;
1794 } else {
1795 retval = target_write_memory(target,
1796 breakpoint->address & 0xFFFFFFFE,
1797 2, 1, breakpoint->orig_instr);
1798 if (retval != ERROR_OK)
1799 return retval;
1800 }
1801
1802 /* update i-cache at breakpoint location */
1803 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1804 breakpoint->length);
1805 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1806 breakpoint->length);
1807 }
1808 breakpoint->set = 0;
1809
1810 return ERROR_OK;
1811 }
1812
1813 static int cortex_a_add_breakpoint(struct target *target,
1814 struct breakpoint *breakpoint)
1815 {
1816 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1817
1818 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1819 LOG_INFO("no hardware breakpoint available");
1820 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1821 }
1822
1823 if (breakpoint->type == BKPT_HARD)
1824 cortex_a->brp_num_available--;
1825
1826 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1827 }
1828
1829 static int cortex_a_add_context_breakpoint(struct target *target,
1830 struct breakpoint *breakpoint)
1831 {
1832 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1833
1834 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1835 LOG_INFO("no hardware breakpoint available");
1836 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1837 }
1838
1839 if (breakpoint->type == BKPT_HARD)
1840 cortex_a->brp_num_available--;
1841
1842 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1843 }
1844
1845 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1846 struct breakpoint *breakpoint)
1847 {
1848 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1849
1850 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1851 LOG_INFO("no hardware breakpoint available");
1852 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1853 }
1854
1855 if (breakpoint->type == BKPT_HARD)
1856 cortex_a->brp_num_available--;
1857
1858 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1859 }
1860
1861
1862 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1863 {
1864 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1865
1866 #if 0
1867 /* It is perfectly possible to remove breakpoints while the target is running */
1868 if (target->state != TARGET_HALTED) {
1869 LOG_WARNING("target not halted");
1870 return ERROR_TARGET_NOT_HALTED;
1871 }
1872 #endif
1873
1874 if (breakpoint->set) {
1875 cortex_a_unset_breakpoint(target, breakpoint);
1876 if (breakpoint->type == BKPT_HARD)
1877 cortex_a->brp_num_available++;
1878 }
1879
1880
1881 return ERROR_OK;
1882 }
1883
1884 /*
1885 * Cortex-A Reset functions
1886 */
1887
1888 static int cortex_a_assert_reset(struct target *target)
1889 {
1890 struct armv7a_common *armv7a = target_to_armv7a(target);
1891
1892 LOG_DEBUG(" ");
1893
1894 /* FIXME when halt is requested, make it work somehow... */
1895
1896 /* Issue some kind of warm reset. */
1897 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1898 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1899 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1900 /* REVISIT handle "pulls" cases, if there's
1901 * hardware that needs them to work.
1902 */
1903 jtag_add_reset(0, 1);
1904 } else {
1905 LOG_ERROR("%s: how to reset?", target_name(target));
1906 return ERROR_FAIL;
1907 }
1908
1909 /* registers are now invalid */
1910 register_cache_invalidate(armv7a->arm.core_cache);
1911
1912 target->state = TARGET_RESET;
1913
1914 return ERROR_OK;
1915 }
1916
1917 static int cortex_a_deassert_reset(struct target *target)
1918 {
1919 int retval;
1920
1921 LOG_DEBUG(" ");
1922
1923 /* be certain SRST is off */
1924 jtag_add_reset(0, 0);
1925
1926 retval = cortex_a_poll(target);
1927 if (retval != ERROR_OK)
1928 return retval;
1929
1930 if (target->reset_halt) {
1931 if (target->state != TARGET_HALTED) {
1932 LOG_WARNING("%s: ran after reset and before halt ...",
1933 target_name(target));
1934 retval = target_halt(target);
1935 if (retval != ERROR_OK)
1936 return retval;
1937 }
1938 }
1939
1940 return ERROR_OK;
1941 }
1942
1943 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1944 {
1945 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1946 * New desired mode must be in mode. Current value of DSCR must be in
1947 * *dscr, which is updated with new value.
1948 *
1949 * This function elides actually sending the mode-change over the debug
1950 * interface if the mode is already set as desired.
1951 */
1952 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1953 if (new_dscr != *dscr) {
1954 struct armv7a_common *armv7a = target_to_armv7a(target);
1955 int retval = mem_ap_sel_write_atomic_u32(armv7a->arm.dap,
1956 armv7a->debug_ap, armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1957 if (retval == ERROR_OK)
1958 *dscr = new_dscr;
1959 return retval;
1960 } else {
1961 return ERROR_OK;
1962 }
1963 }
1964
1965 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1966 uint32_t value, uint32_t *dscr)
1967 {
1968 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1969 struct armv7a_common *armv7a = target_to_armv7a(target);
1970 struct adiv5_dap *swjdp = armv7a->arm.dap;
1971 long long then = timeval_ms();
1972 int retval;
1973
1974 while ((*dscr & mask) != value) {
1975 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1976 armv7a->debug_base + CPUDBG_DSCR, dscr);
1977 if (retval != ERROR_OK)
1978 return retval;
1979 if (timeval_ms() > then + 1000) {
1980 LOG_ERROR("timeout waiting for DSCR bit change");
1981 return ERROR_FAIL;
1982 }
1983 }
1984 return ERROR_OK;
1985 }
1986
1987 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1988 uint32_t *data, uint32_t *dscr)
1989 {
1990 int retval;
1991 struct armv7a_common *armv7a = target_to_armv7a(target);
1992 struct adiv5_dap *swjdp = armv7a->arm.dap;
1993
1994 /* Move from coprocessor to R0. */
1995 retval = cortex_a_exec_opcode(target, opcode, dscr);
1996 if (retval != ERROR_OK)
1997 return retval;
1998
1999 /* Move from R0 to DTRTX. */
2000 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2001 if (retval != ERROR_OK)
2002 return retval;
2003
2004 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2005 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2006 * must also check TXfull_l). Most of the time this will be free
2007 * because TXfull_l will be set immediately and cached in dscr. */
2008 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2009 DSCR_DTRTX_FULL_LATCHED, dscr);
2010 if (retval != ERROR_OK)
2011 return retval;
2012
2013 /* Read the value transferred to DTRTX. */
2014 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2015 armv7a->debug_base + CPUDBG_DTRTX, data);
2016 if (retval != ERROR_OK)
2017 return retval;
2018
2019 return ERROR_OK;
2020 }
2021
2022 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2023 uint32_t *dfsr, uint32_t *dscr)
2024 {
2025 int retval;
2026
2027 if (dfar) {
2028 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2029 if (retval != ERROR_OK)
2030 return retval;
2031 }
2032
2033 if (dfsr) {
2034 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2035 if (retval != ERROR_OK)
2036 return retval;
2037 }
2038
2039 return ERROR_OK;
2040 }
2041
2042 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2043 uint32_t data, uint32_t *dscr)
2044 {
2045 int retval;
2046 struct armv7a_common *armv7a = target_to_armv7a(target);
2047 struct adiv5_dap *swjdp = armv7a->arm.dap;
2048
2049 /* Write the value into DTRRX. */
2050 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2051 armv7a->debug_base + CPUDBG_DTRRX, data);
2052 if (retval != ERROR_OK)
2053 return retval;
2054
2055 /* Move from DTRRX to R0. */
2056 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2057 if (retval != ERROR_OK)
2058 return retval;
2059
2060 /* Move from R0 to coprocessor. */
2061 retval = cortex_a_exec_opcode(target, opcode, dscr);
2062 if (retval != ERROR_OK)
2063 return retval;
2064
2065 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2066 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2067 * check RXfull_l). Most of the time this will be free because RXfull_l
2068 * will be cleared immediately and cached in dscr. */
2069 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2070 if (retval != ERROR_OK)
2071 return retval;
2072
2073 return ERROR_OK;
2074 }
2075
2076 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2077 uint32_t dfsr, uint32_t *dscr)
2078 {
2079 int retval;
2080
2081 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2082 if (retval != ERROR_OK)
2083 return retval;
2084
2085 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2086 if (retval != ERROR_OK)
2087 return retval;
2088
2089 return ERROR_OK;
2090 }
2091
2092 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2093 {
2094 uint32_t status, upper4;
2095
2096 if (dfsr & (1 << 9)) {
2097 /* LPAE format. */
2098 status = dfsr & 0x3f;
2099 upper4 = status >> 2;
2100 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2101 return ERROR_TARGET_TRANSLATION_FAULT;
2102 else if (status == 33)
2103 return ERROR_TARGET_UNALIGNED_ACCESS;
2104 else
2105 return ERROR_TARGET_DATA_ABORT;
2106 } else {
2107 /* Normal format. */
2108 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2109 if (status == 1)
2110 return ERROR_TARGET_UNALIGNED_ACCESS;
2111 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2112 status == 9 || status == 11 || status == 13 || status == 15)
2113 return ERROR_TARGET_TRANSLATION_FAULT;
2114 else
2115 return ERROR_TARGET_DATA_ABORT;
2116 }
2117 }
2118
2119 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2120 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2121 {
2122 /* Writes count objects of size size from *buffer. Old value of DSCR must
2123 * be in *dscr; updated to new value. This is slow because it works for
2124 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2125 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2126 * preferred.
2127 * Preconditions:
2128 * - Address is in R0.
2129 * - R0 is marked dirty.
2130 */
2131 struct armv7a_common *armv7a = target_to_armv7a(target);
2132 struct adiv5_dap *swjdp = armv7a->arm.dap;
2133 struct arm *arm = &armv7a->arm;
2134 int retval;
2135
2136 /* Mark register R1 as dirty, to use for transferring data. */
2137 arm_reg_current(arm, 1)->dirty = true;
2138
2139 /* Switch to non-blocking mode if not already in that mode. */
2140 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2141 if (retval != ERROR_OK)
2142 return retval;
2143
2144 /* Go through the objects. */
2145 while (count) {
2146 /* Write the value to store into DTRRX. */
2147 uint32_t data, opcode;
2148 if (size == 1)
2149 data = *buffer;
2150 else if (size == 2)
2151 data = target_buffer_get_u16(target, buffer);
2152 else
2153 data = target_buffer_get_u32(target, buffer);
2154 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2155 armv7a->debug_base + CPUDBG_DTRRX, data);
2156 if (retval != ERROR_OK)
2157 return retval;
2158
2159 /* Transfer the value from DTRRX to R1. */
2160 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2161 if (retval != ERROR_OK)
2162 return retval;
2163
2164 /* Write the value transferred to R1 into memory. */
2165 if (size == 1)
2166 opcode = ARMV4_5_STRB_IP(1, 0);
2167 else if (size == 2)
2168 opcode = ARMV4_5_STRH_IP(1, 0);
2169 else
2170 opcode = ARMV4_5_STRW_IP(1, 0);
2171 retval = cortex_a_exec_opcode(target, opcode, dscr);
2172 if (retval != ERROR_OK)
2173 return retval;
2174
2175 /* Check for faults and return early. */
2176 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2177 return ERROR_OK; /* A data fault is not considered a system failure. */
2178
2179 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2180 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2181 * must also check RXfull_l). Most of the time this will be free
2182 * because RXfull_l will be cleared immediately and cached in dscr. */
2183 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2184 if (retval != ERROR_OK)
2185 return retval;
2186
2187 /* Advance. */
2188 buffer += size;
2189 --count;
2190 }
2191
2192 return ERROR_OK;
2193 }
2194
2195 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2196 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2197 {
2198 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2199 * in *dscr; updated to new value. This is fast but only works for
2200 * word-sized objects at aligned addresses.
2201 * Preconditions:
2202 * - Address is in R0 and must be a multiple of 4.
2203 * - R0 is marked dirty.
2204 */
2205 struct armv7a_common *armv7a = target_to_armv7a(target);
2206 struct adiv5_dap *swjdp = armv7a->arm.dap;
2207 int retval;
2208
2209 /* Switch to fast mode if not already in that mode. */
2210 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2211 if (retval != ERROR_OK)
2212 return retval;
2213
2214 /* Latch STC instruction. */
2215 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2216 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2217 if (retval != ERROR_OK)
2218 return retval;
2219
2220 /* Transfer all the data and issue all the instructions. */
2221 return mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2222 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2223 }
2224
2225 static int cortex_a_write_apb_ab_memory(struct target *target,
2226 uint32_t address, uint32_t size,
2227 uint32_t count, const uint8_t *buffer)
2228 {
2229 /* Write memory through APB-AP. */
2230 int retval, final_retval;
2231 struct armv7a_common *armv7a = target_to_armv7a(target);
2232 struct adiv5_dap *swjdp = armv7a->arm.dap;
2233 struct arm *arm = &armv7a->arm;
2234 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2235
2236 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2237 address, size, count);
2238 if (target->state != TARGET_HALTED) {
2239 LOG_WARNING("target not halted");
2240 return ERROR_TARGET_NOT_HALTED;
2241 }
2242
2243 if (!count)
2244 return ERROR_OK;
2245
2246 /* Clear any abort. */
2247 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2248 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2249 if (retval != ERROR_OK)
2250 return retval;
2251
2252 /* Read DSCR. */
2253 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2254 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2255 if (retval != ERROR_OK)
2256 return retval;
2257
2258 /* Switch to non-blocking mode if not already in that mode. */
2259 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2260 if (retval != ERROR_OK)
2261 goto out;
2262
2263 /* Mark R0 as dirty. */
2264 arm_reg_current(arm, 0)->dirty = true;
2265
2266 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2267 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2268 if (retval != ERROR_OK)
2269 goto out;
2270
2271 /* Get the memory address into R0. */
2272 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2273 armv7a->debug_base + CPUDBG_DTRRX, address);
2274 if (retval != ERROR_OK)
2275 goto out;
2276 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2277 if (retval != ERROR_OK)
2278 goto out;
2279
2280 if (size == 4 && (address % 4) == 0) {
2281 /* We are doing a word-aligned transfer, so use fast mode. */
2282 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2283 } else {
2284 /* Use slow path. */
2285 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2286 }
2287
2288 out:
2289 final_retval = retval;
2290
2291 /* Switch to non-blocking mode if not already in that mode. */
2292 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2293 if (final_retval == ERROR_OK)
2294 final_retval = retval;
2295
2296 /* Wait for last issued instruction to complete. */
2297 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2298 if (final_retval == ERROR_OK)
2299 final_retval = retval;
2300
2301 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2302 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2303 * check RXfull_l). Most of the time this will be free because RXfull_l
2304 * will be cleared immediately and cached in dscr. However, don’t do this
2305 * if there is fault, because then the instruction might not have completed
2306 * successfully. */
2307 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2308 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2309 if (retval != ERROR_OK)
2310 return retval;
2311 }
2312
2313 /* If there were any sticky abort flags, clear them. */
2314 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2315 fault_dscr = dscr;
2316 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2317 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2318 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2319 } else {
2320 fault_dscr = 0;
2321 }
2322
2323 /* Handle synchronous data faults. */
2324 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2325 if (final_retval == ERROR_OK) {
2326 /* Final return value will reflect cause of fault. */
2327 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2328 if (retval == ERROR_OK) {
2329 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2330 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2331 } else
2332 final_retval = retval;
2333 }
2334 /* Fault destroyed DFAR/DFSR; restore them. */
2335 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2336 if (retval != ERROR_OK)
2337 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2338 }
2339
2340 /* Handle asynchronous data faults. */
2341 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2342 if (final_retval == ERROR_OK)
2343 /* No other error has been recorded so far, so keep this one. */
2344 final_retval = ERROR_TARGET_DATA_ABORT;
2345 }
2346
2347 /* If the DCC is nonempty, clear it. */
2348 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2349 uint32_t dummy;
2350 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2351 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2352 if (final_retval == ERROR_OK)
2353 final_retval = retval;
2354 }
2355 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2356 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2357 if (final_retval == ERROR_OK)
2358 final_retval = retval;
2359 }
2360
2361 /* Done. */
2362 return final_retval;
2363 }
2364
2365 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2366 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2367 {
2368 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2369 * in *dscr; updated to new value. This is slow because it works for
2370 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2371 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2372 * preferred.
2373 * Preconditions:
2374 * - Address is in R0.
2375 * - R0 is marked dirty.
2376 */
2377 struct armv7a_common *armv7a = target_to_armv7a(target);
2378 struct adiv5_dap *swjdp = armv7a->arm.dap;
2379 struct arm *arm = &armv7a->arm;
2380 int retval;
2381
2382 /* Mark register R1 as dirty, to use for transferring data. */
2383 arm_reg_current(arm, 1)->dirty = true;
2384
2385 /* Switch to non-blocking mode if not already in that mode. */
2386 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2387 if (retval != ERROR_OK)
2388 return retval;
2389
2390 /* Go through the objects. */
2391 while (count) {
2392 /* Issue a load of the appropriate size to R1. */
2393 uint32_t opcode, data;
2394 if (size == 1)
2395 opcode = ARMV4_5_LDRB_IP(1, 0);
2396 else if (size == 2)
2397 opcode = ARMV4_5_LDRH_IP(1, 0);
2398 else
2399 opcode = ARMV4_5_LDRW_IP(1, 0);
2400 retval = cortex_a_exec_opcode(target, opcode, dscr);
2401 if (retval != ERROR_OK)
2402 return retval;
2403
2404 /* Issue a write of R1 to DTRTX. */
2405 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2406 if (retval != ERROR_OK)
2407 return retval;
2408
2409 /* Check for faults and return early. */
2410 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2411 return ERROR_OK; /* A data fault is not considered a system failure. */
2412
2413 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2414 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2415 * must also check TXfull_l). Most of the time this will be free
2416 * because TXfull_l will be set immediately and cached in dscr. */
2417 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2418 DSCR_DTRTX_FULL_LATCHED, dscr);
2419 if (retval != ERROR_OK)
2420 return retval;
2421
2422 /* Read the value transferred to DTRTX into the buffer. */
2423 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2424 armv7a->debug_base + CPUDBG_DTRTX, &data);
2425 if (retval != ERROR_OK)
2426 return retval;
2427 if (size == 1)
2428 *buffer = (uint8_t) data;
2429 else if (size == 2)
2430 target_buffer_set_u16(target, buffer, (uint16_t) data);
2431 else
2432 target_buffer_set_u32(target, buffer, data);
2433
2434 /* Advance. */
2435 buffer += size;
2436 --count;
2437 }
2438
2439 return ERROR_OK;
2440 }
2441
2442 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2443 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2444 {
2445 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2446 * *dscr; updated to new value. This is fast but only works for word-sized
2447 * objects at aligned addresses.
2448 * Preconditions:
2449 * - Address is in R0 and must be a multiple of 4.
2450 * - R0 is marked dirty.
2451 */
2452 struct armv7a_common *armv7a = target_to_armv7a(target);
2453 struct adiv5_dap *swjdp = armv7a->arm.dap;
2454 uint32_t u32;
2455 int retval;
2456
2457 /* Switch to non-blocking mode if not already in that mode. */
2458 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2459 if (retval != ERROR_OK)
2460 return retval;
2461
2462 /* Issue the LDC instruction via a write to ITR. */
2463 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2464 if (retval != ERROR_OK)
2465 return retval;
2466
2467 count--;
2468
2469 if (count > 0) {
2470 /* Switch to fast mode if not already in that mode. */
2471 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2472 if (retval != ERROR_OK)
2473 return retval;
2474
2475 /* Latch LDC instruction. */
2476 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2477 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2478 if (retval != ERROR_OK)
2479 return retval;
2480
2481 /* Read the value transferred to DTRTX into the buffer. Due to fast
2482 * mode rules, this blocks until the instruction finishes executing and
2483 * then reissues the read instruction to read the next word from
2484 * memory. The last read of DTRTX in this call reads the second-to-last
2485 * word from memory and issues the read instruction for the last word.
2486 */
2487 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2488 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2489 if (retval != ERROR_OK)
2490 return retval;
2491
2492 /* Advance. */
2493 buffer += count * 4;
2494 }
2495
2496 /* Wait for last issued instruction to complete. */
2497 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2498 if (retval != ERROR_OK)
2499 return retval;
2500
2501 /* Switch to non-blocking mode if not already in that mode. */
2502 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2503 if (retval != ERROR_OK)
2504 return retval;
2505
2506 /* Check for faults and return early. */
2507 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2508 return ERROR_OK; /* A data fault is not considered a system failure. */
2509
2510 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2511 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2512 * check TXfull_l). Most of the time this will be free because TXfull_l
2513 * will be set immediately and cached in dscr. */
2514 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2515 DSCR_DTRTX_FULL_LATCHED, dscr);
2516 if (retval != ERROR_OK)
2517 return retval;
2518
2519 /* Read the value transferred to DTRTX into the buffer. This is the last
2520 * word. */
2521 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2522 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2523 if (retval != ERROR_OK)
2524 return retval;
2525 target_buffer_set_u32(target, buffer, u32);
2526
2527 return ERROR_OK;
2528 }
2529
2530 static int cortex_a_read_apb_ab_memory(struct target *target,
2531 uint32_t address, uint32_t size,
2532 uint32_t count, uint8_t *buffer)
2533 {
2534 /* Read memory through APB-AP. */
2535 int retval, final_retval;
2536 struct armv7a_common *armv7a = target_to_armv7a(target);
2537 struct adiv5_dap *swjdp = armv7a->arm.dap;
2538 struct arm *arm = &armv7a->arm;
2539 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2540
2541 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2542 address, size, count);
2543 if (target->state != TARGET_HALTED) {
2544 LOG_WARNING("target not halted");
2545 return ERROR_TARGET_NOT_HALTED;
2546 }
2547
2548 if (!count)
2549 return ERROR_OK;
2550
2551 /* Clear any abort. */
2552 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2553 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2554 if (retval != ERROR_OK)
2555 return retval;
2556
2557 /* Read DSCR */
2558 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2559 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2560 if (retval != ERROR_OK)
2561 return retval;
2562
2563 /* Switch to non-blocking mode if not already in that mode. */
2564 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2565 if (retval != ERROR_OK)
2566 goto out;
2567
2568 /* Mark R0 as dirty. */
2569 arm_reg_current(arm, 0)->dirty = true;
2570
2571 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2572 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2573 if (retval != ERROR_OK)
2574 goto out;
2575
2576 /* Get the memory address into R0. */
2577 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2578 armv7a->debug_base + CPUDBG_DTRRX, address);
2579 if (retval != ERROR_OK)
2580 goto out;
2581 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2582 if (retval != ERROR_OK)
2583 goto out;
2584
2585 if (size == 4 && (address % 4) == 0) {
2586 /* We are doing a word-aligned transfer, so use fast mode. */
2587 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2588 } else {
2589 /* Use slow path. */
2590 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2591 }
2592
2593 out:
2594 final_retval = retval;
2595
2596 /* Switch to non-blocking mode if not already in that mode. */
2597 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2598 if (final_retval == ERROR_OK)
2599 final_retval = retval;
2600
2601 /* Wait for last issued instruction to complete. */
2602 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2603 if (final_retval == ERROR_OK)
2604 final_retval = retval;
2605
2606 /* If there were any sticky abort flags, clear them. */
2607 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2608 fault_dscr = dscr;
2609 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2610 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2611 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2612 } else {
2613 fault_dscr = 0;
2614 }
2615
2616 /* Handle synchronous data faults. */
2617 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2618 if (final_retval == ERROR_OK) {
2619 /* Final return value will reflect cause of fault. */
2620 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2621 if (retval == ERROR_OK) {
2622 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2623 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2624 } else
2625 final_retval = retval;
2626 }
2627 /* Fault destroyed DFAR/DFSR; restore them. */
2628 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2629 if (retval != ERROR_OK)
2630 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2631 }
2632
2633 /* Handle asynchronous data faults. */
2634 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2635 if (final_retval == ERROR_OK)
2636 /* No other error has been recorded so far, so keep this one. */
2637 final_retval = ERROR_TARGET_DATA_ABORT;
2638 }
2639
2640 /* If the DCC is nonempty, clear it. */
2641 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2642 uint32_t dummy;
2643 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2644 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2645 if (final_retval == ERROR_OK)
2646 final_retval = retval;
2647 }
2648 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2649 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2650 if (final_retval == ERROR_OK)
2651 final_retval = retval;
2652 }
2653
2654 /* Done. */
2655 return final_retval;
2656 }
2657
2658
2659 /*
2660 * Cortex-A Memory access
2661 *
2662 * This is same Cortex M3 but we must also use the correct
2663 * ap number for every access.
2664 */
2665
2666 static int cortex_a_read_phys_memory(struct target *target,
2667 uint32_t address, uint32_t size,
2668 uint32_t count, uint8_t *buffer)
2669 {
2670 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2671
2672 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2673 address, size, count);
2674
2675 if (count && buffer) {
2676 /* read memory through APB-AP */
2677 cortex_a_prep_memaccess(target, 1);
2678 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2679 cortex_a_post_memaccess(target, 1);
2680 }
2681 return retval;
2682 }
2683
2684 static int cortex_a_read_memory(struct target *target, uint32_t address,
2685 uint32_t size, uint32_t count, uint8_t *buffer)
2686 {
2687 int retval;
2688
2689 /* cortex_a handles unaligned memory access */
2690 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2691 size, count);
2692
2693 cortex_a_prep_memaccess(target, 0);
2694 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2695 cortex_a_post_memaccess(target, 0);
2696
2697 return retval;
2698 }
2699
2700 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2701 uint32_t size, uint32_t count, uint8_t *buffer)
2702 {
2703 int mmu_enabled = 0;
2704 uint32_t virt, phys;
2705 int retval;
2706 struct armv7a_common *armv7a = target_to_armv7a(target);
2707 struct adiv5_dap *swjdp = armv7a->arm.dap;
2708 uint8_t apsel = swjdp->apsel;
2709
2710 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap))
2711 return target_read_memory(target, address, size, count, buffer);
2712
2713 /* cortex_a handles unaligned memory access */
2714 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2715 size, count);
2716
2717 /* determine if MMU was enabled on target stop */
2718 if (!armv7a->is_armv7r) {
2719 retval = cortex_a_mmu(target, &mmu_enabled);
2720 if (retval != ERROR_OK)
2721 return retval;
2722 }
2723
2724 if (mmu_enabled) {
2725 virt = address;
2726 retval = cortex_a_virt2phys(target, virt, &phys);
2727 if (retval != ERROR_OK)
2728 return retval;
2729
2730 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2731 virt, phys);
2732 address = phys;
2733 }
2734
2735 if (!count || !buffer)
2736 return ERROR_COMMAND_SYNTAX_ERROR;
2737
2738 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2739
2740 return retval;
2741 }
2742
2743 static int cortex_a_write_phys_memory(struct target *target,
2744 uint32_t address, uint32_t size,
2745 uint32_t count, const uint8_t *buffer)
2746 {
2747 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2748
2749 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2750 size, count);
2751
2752 if (count && buffer) {
2753 /* write memory through APB-AP */
2754 cortex_a_prep_memaccess(target, 1);
2755 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2756 cortex_a_post_memaccess(target, 1);
2757 }
2758
2759 return retval;
2760 }
2761
2762 static int cortex_a_write_memory(struct target *target, uint32_t address,
2763 uint32_t size, uint32_t count, const uint8_t *buffer)
2764 {
2765 int retval;
2766
2767 /* cortex_a handles unaligned memory access */
2768 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2769 size, count);
2770
2771 /* memory writes bypass the caches, must flush before writing */
2772 armv7a_cache_auto_flush_on_write(target, address, size * count);
2773
2774 cortex_a_prep_memaccess(target, 0);
2775 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2776 cortex_a_post_memaccess(target, 0);
2777 return retval;
2778 }
2779
2780 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2781 uint32_t size, uint32_t count, const uint8_t *buffer)
2782 {
2783 int mmu_enabled = 0;
2784 uint32_t virt, phys;
2785 int retval;
2786 struct armv7a_common *armv7a = target_to_armv7a(target);
2787 struct adiv5_dap *swjdp = armv7a->arm.dap;
2788 uint8_t apsel = swjdp->apsel;
2789
2790 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap))
2791 return target_write_memory(target, address, size, count, buffer);
2792
2793 /* cortex_a handles unaligned memory access */
2794 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2795 size, count);
2796
2797 /* determine if MMU was enabled on target stop */
2798 if (!armv7a->is_armv7r) {
2799 retval = cortex_a_mmu(target, &mmu_enabled);
2800 if (retval != ERROR_OK)
2801 return retval;
2802 }
2803
2804 if (mmu_enabled) {
2805 virt = address;
2806 retval = cortex_a_virt2phys(target, virt, &phys);
2807 if (retval != ERROR_OK)
2808 return retval;
2809
2810 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2811 virt,
2812 phys);
2813 address = phys;
2814 }
2815
2816 if (!count || !buffer)
2817 return ERROR_COMMAND_SYNTAX_ERROR;
2818
2819 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2820
2821 return retval;
2822 }
2823
2824 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2825 uint32_t count, uint8_t *buffer)
2826 {
2827 uint32_t size;
2828
2829 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2830 * will have something to do with the size we leave to it. */
2831 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2832 if (address & size) {
2833 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2834 if (retval != ERROR_OK)
2835 return retval;
2836 address += size;
2837 count -= size;
2838 buffer += size;
2839 }
2840 }
2841
2842 /* Read the data with as large access size as possible. */
2843 for (; size > 0; size /= 2) {
2844 uint32_t aligned = count - count % size;
2845 if (aligned > 0) {
2846 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2847 if (retval != ERROR_OK)
2848 return retval;
2849 address += aligned;
2850 count -= aligned;
2851 buffer += aligned;
2852 }
2853 }
2854
2855 return ERROR_OK;
2856 }
2857
2858 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2859 uint32_t count, const uint8_t *buffer)
2860 {
2861 uint32_t size;
2862
2863 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2864 * will have something to do with the size we leave to it. */
2865 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2866 if (address & size) {
2867 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2868 if (retval != ERROR_OK)
2869 return retval;
2870 address += size;
2871 count -= size;
2872 buffer += size;
2873 }
2874 }
2875
2876 /* Write the data with as large access size as possible. */
2877 for (; size > 0; size /= 2) {
2878 uint32_t aligned = count - count % size;
2879 if (aligned > 0) {
2880 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2881 if (retval != ERROR_OK)
2882 return retval;
2883 address += aligned;
2884 count -= aligned;
2885 buffer += aligned;
2886 }
2887 }
2888
2889 return ERROR_OK;
2890 }
2891
2892 static int cortex_a_handle_target_request(void *priv)
2893 {
2894 struct target *target = priv;
2895 struct armv7a_common *armv7a = target_to_armv7a(target);
2896 struct adiv5_dap *swjdp = armv7a->arm.dap;
2897 int retval;
2898
2899 if (!target_was_examined(target))
2900 return ERROR_OK;
2901 if (!target->dbg_msg_enabled)
2902 return ERROR_OK;
2903
2904 if (target->state == TARGET_RUNNING) {
2905 uint32_t request;
2906 uint32_t dscr;
2907 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2908 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2909
2910 /* check if we have data */
2911 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2912 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2913 armv7a->debug_base + CPUDBG_DTRTX, &request);
2914 if (retval == ERROR_OK) {
2915 target_request(target, request);
2916 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2917 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2918 }
2919 }
2920 }
2921
2922 return ERROR_OK;
2923 }
2924
2925 /*
2926 * Cortex-A target information and configuration
2927 */
2928
2929 static int cortex_a_examine_first(struct target *target)
2930 {
2931 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2932 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2933 struct adiv5_dap *swjdp = armv7a->arm.dap;
2934 int i;
2935 int retval = ERROR_OK;
2936 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2937
2938 /* We do one extra read to ensure DAP is configured,
2939 * we call ahbap_debugport_init(swjdp) instead
2940 */
2941 retval = ahbap_debugport_init(swjdp, 0);
2942 if (retval != ERROR_OK)
2943 return retval;
2944
2945 /* Search for the APB-AB - it is needed for access to debug registers */
2946 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2947 if (retval != ERROR_OK) {
2948 LOG_ERROR("Could not find APB-AP for debug access");
2949 return retval;
2950 }
2951 /* Search for the AHB-AB */
2952 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2953 if (retval != ERROR_OK) {
2954 /* AHB-AP not found - use APB-AP */
2955 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2956 armv7a->memory_ap_available = false;
2957 } else {
2958 armv7a->memory_ap_available = true;
2959 }
2960
2961
2962 if (!target->dbgbase_set) {
2963 uint32_t dbgbase;
2964 /* Get ROM Table base */
2965 uint32_t apid;
2966 int32_t coreidx = target->coreid;
2967 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2968 target->cmd_name);
2969 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2970 if (retval != ERROR_OK)
2971 return retval;
2972 /* Lookup 0x15 -- Processor DAP */
2973 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2974 &armv7a->debug_base, &coreidx);
2975 if (retval != ERROR_OK) {
2976 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2977 target->cmd_name);
2978 return retval;
2979 }
2980 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2981 target->coreid, armv7a->debug_base);
2982 } else
2983 armv7a->debug_base = target->dbgbase;
2984
2985 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2986 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2987 if (retval != ERROR_OK)
2988 return retval;
2989
2990 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2991 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2992 if (retval != ERROR_OK) {
2993 LOG_DEBUG("Examine %s failed", "CPUID");
2994 return retval;
2995 }
2996
2997 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2998 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2999 if (retval != ERROR_OK) {
3000 LOG_DEBUG("Examine %s failed", "CTYPR");
3001 return retval;
3002 }
3003
3004 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3005 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
3006 if (retval != ERROR_OK) {
3007 LOG_DEBUG("Examine %s failed", "TTYPR");
3008 return retval;
3009 }
3010
3011 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3012 armv7a->debug_base + CPUDBG_DIDR, &didr);
3013 if (retval != ERROR_OK) {
3014 LOG_DEBUG("Examine %s failed", "DIDR");
3015 return retval;
3016 }
3017
3018 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3019 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
3020 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
3021 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3022
3023 cortex_a->cpuid = cpuid;
3024 cortex_a->ctypr = ctypr;
3025 cortex_a->ttypr = ttypr;
3026 cortex_a->didr = didr;
3027
3028 /* Unlocking the debug registers */
3029 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3030 CORTEX_A15_PARTNUM) {
3031
3032 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
3033 armv7a->debug_base + CPUDBG_OSLAR,
3034 0);
3035
3036 if (retval != ERROR_OK)
3037 return retval;
3038
3039 }
3040 /* Unlocking the debug registers */
3041 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3042 CORTEX_A7_PARTNUM) {
3043
3044 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
3045 armv7a->debug_base + CPUDBG_OSLAR,
3046 0);
3047
3048 if (retval != ERROR_OK)
3049 return retval;
3050
3051 }
3052 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
3053 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3054
3055 if (retval != ERROR_OK)
3056 return retval;
3057
3058 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3059
3060 armv7a->arm.core_type = ARM_MODE_MON;
3061
3062 /* Avoid recreating the registers cache */
3063 if (!target_was_examined(target)) {
3064 retval = cortex_a_dpm_setup(cortex_a, didr);
3065 if (retval != ERROR_OK)
3066 return retval;
3067 }
3068
3069 /* Setup Breakpoint Register Pairs */
3070 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3071 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3072 cortex_a->brp_num_available = cortex_a->brp_num;
3073 free(cortex_a->brp_list);
3074 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3075 /* cortex_a->brb_enabled = ????; */
3076 for (i = 0; i < cortex_a->brp_num; i++) {
3077 cortex_a->brp_list[i].used = 0;
3078 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3079 cortex_a->brp_list[i].type = BRP_NORMAL;
3080 else
3081 cortex_a->brp_list[i].type = BRP_CONTEXT;
3082 cortex_a->brp_list[i].value = 0;
3083 cortex_a->brp_list[i].control = 0;
3084 cortex_a->brp_list[i].BRPn = i;
3085 }
3086
3087 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3088
3089 target_set_examined(target);
3090 return ERROR_OK;
3091 }
3092
3093 static int cortex_a_examine(struct target *target)
3094 {
3095 int retval = ERROR_OK;
3096
3097 /* Reestablish communication after target reset */
3098 retval = cortex_a_examine_first(target);
3099
3100 /* Configure core debug access */
3101 if (retval == ERROR_OK)
3102 retval = cortex_a_init_debug_access(target);
3103
3104 return retval;
3105 }
3106
3107 /*
3108 * Cortex-A target creation and initialization
3109 */
3110
3111 static int cortex_a_init_target(struct command_context *cmd_ctx,
3112 struct target *target)
3113 {
3114 /* examine_first() does a bunch of this */
3115 return ERROR_OK;
3116 }
3117
3118 static int cortex_a_init_arch_info(struct target *target,
3119 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3120 {
3121 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3122
3123 /* Setup struct cortex_a_common */
3124 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3125
3126 /* tap has no dap initialized */
3127 if (!tap->dap) {
3128 tap->dap = dap_init();
3129
3130 /* Leave (only) generic DAP stuff for debugport_init() */
3131 tap->dap->tap = tap;
3132 }
3133
3134 tap->dap->ap[dap_ap_get_select(tap->dap)].memaccess_tck = 80;
3135 armv7a->arm.dap = tap->dap;
3136
3137 cortex_a->fast_reg_read = 0;
3138
3139 /* register arch-specific functions */
3140 armv7a->examine_debug_reason = NULL;
3141
3142 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3143
3144 armv7a->pre_restore_context = NULL;
3145
3146 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3147
3148
3149 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3150
3151 /* REVISIT v7a setup should be in a v7a-specific routine */
3152 armv7a_init_arch_info(target, armv7a);
3153 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3154
3155 return ERROR_OK;
3156 }
3157
3158 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3159 {
3160 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3161
3162 cortex_a->armv7a_common.is_armv7r = false;
3163
3164 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3165 }
3166
3167 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3168 {
3169 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3170
3171 cortex_a->armv7a_common.is_armv7r = true;
3172
3173 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3174 }
3175
3176 static void cortex_a_deinit_target(struct target *target)
3177 {
3178 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3179 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3180
3181 free(cortex_a->brp_list);
3182 free(dpm->dbp);
3183 free(dpm->dwp);
3184 free(cortex_a);
3185 }
3186
3187 static int cortex_a_mmu(struct target *target, int *enabled)
3188 {
3189 struct armv7a_common *armv7a = target_to_armv7a(target);
3190
3191 if (target->state != TARGET_HALTED) {
3192 LOG_ERROR("%s: target not halted", __func__);
3193 return ERROR_TARGET_INVALID;
3194 }
3195
3196 if (armv7a->is_armv7r)
3197 *enabled = 0;
3198 else
3199 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3200
3201 return ERROR_OK;
3202 }
3203
3204 static int cortex_a_virt2phys(struct target *target,
3205 uint32_t virt, uint32_t *phys)
3206 {
3207 int retval = ERROR_FAIL;
3208 struct armv7a_common *armv7a = target_to_armv7a(target);
3209 struct adiv5_dap *swjdp = armv7a->arm.dap;
3210 uint8_t apsel = swjdp->apsel;
3211 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
3212 uint32_t ret;
3213 retval = armv7a_mmu_translate_va(target,
3214 virt, &ret);
3215 if (retval != ERROR_OK)
3216 goto done;
3217 *phys = ret;
3218 } else {/* use this method if armv7a->memory_ap not selected
3219 * mmu must be enable in order to get a correct translation */
3220 retval = cortex_a_mmu_modify(target, 1);
3221 if (retval != ERROR_OK)
3222 goto done;
3223 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3224 }
3225 done:
3226 return retval;
3227 }
3228
3229 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3230 {
3231 struct target *target = get_current_target(CMD_CTX);
3232 struct armv7a_common *armv7a = target_to_armv7a(target);
3233
3234 return armv7a_handle_cache_info_command(CMD_CTX,
3235 &armv7a->armv7a_mmu.armv7a_cache);
3236 }
3237
3238
3239 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3240 {
3241 struct target *target = get_current_target(CMD_CTX);
3242 if (!target_was_examined(target)) {
3243 LOG_ERROR("target not examined yet");
3244 return ERROR_FAIL;
3245 }
3246
3247 return cortex_a_init_debug_access(target);
3248 }
3249 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3250 {
3251 struct target *target = get_current_target(CMD_CTX);
3252 /* check target is an smp target */
3253 struct target_list *head;
3254 struct target *curr;
3255 head = target->head;
3256 target->smp = 0;
3257 if (head != (struct target_list *)NULL) {
3258 while (head != (struct target_list *)NULL) {
3259 curr = head->target;
3260 curr->smp = 0;
3261 head = head->next;
3262 }
3263 /* fixes the target display to the debugger */
3264 target->gdb_service->target = target;
3265 }
3266 return ERROR_OK;
3267 }
3268
3269 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3270 {
3271 struct target *target = get_current_target(CMD_CTX);
3272 struct target_list *head;
3273 struct target *curr;
3274 head = target->head;
3275 if (head != (struct target_list *)NULL) {
3276 target->smp = 1;
3277 while (head != (struct target_list *)NULL) {
3278 curr = head->target;
3279 curr->smp = 1;
3280 head = head->next;
3281 }
3282 }
3283 return ERROR_OK;
3284 }
3285
3286 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3287 {
3288 struct target *target = get_current_target(CMD_CTX);
3289 int retval = ERROR_OK;
3290 struct target_list *head;
3291 head = target->head;
3292 if (head != (struct target_list *)NULL) {
3293 if (CMD_ARGC == 1) {
3294 int coreid = 0;
3295 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3296 if (ERROR_OK != retval)
3297 return retval;
3298 target->gdb_service->core[1] = coreid;
3299
3300 }
3301 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3302 , target->gdb_service->core[1]);
3303 }
3304 return ERROR_OK;
3305 }
3306
3307 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3308 {
3309 struct target *target = get_current_target(CMD_CTX);
3310 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3311
3312 static const Jim_Nvp nvp_maskisr_modes[] = {
3313 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3314 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3315 { .name = NULL, .value = -1 },
3316 };
3317 const Jim_Nvp *n;
3318
3319 if (target->state != TARGET_HALTED) {
3320 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3321 return ERROR_OK;
3322 }
3323
3324 if (CMD_ARGC > 0) {
3325 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3326 if (n->name == NULL)
3327 return ERROR_COMMAND_SYNTAX_ERROR;
3328 cortex_a->isrmasking_mode = n->value;
3329
3330 }
3331
3332 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3333 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3334
3335 return ERROR_OK;
3336 }
3337
3338 static const struct command_registration cortex_a_exec_command_handlers[] = {
3339 {
3340 .name = "cache_info",
3341 .handler = cortex_a_handle_cache_info_command,
3342 .mode = COMMAND_EXEC,
3343 .help = "display information about target caches",
3344 .usage = "",
3345 },
3346 {
3347 .name = "dbginit",
3348 .handler = cortex_a_handle_dbginit_command,
3349 .mode = COMMAND_EXEC,
3350 .help = "Initialize core debug",
3351 .usage = "",
3352 },
3353 { .name = "smp_off",
3354 .handler = cortex_a_handle_smp_off_command,
3355 .mode = COMMAND_EXEC,
3356 .help = "Stop smp handling",
3357 .usage = "",},
3358 {
3359 .name = "smp_on",
3360 .handler = cortex_a_handle_smp_on_command,
3361 .mode = COMMAND_EXEC,
3362 .help = "Restart smp handling",
3363 .usage = "",
3364 },
3365 {
3366 .name = "smp_gdb",
3367 .handler = cortex_a_handle_smp_gdb_command,
3368 .mode = COMMAND_EXEC,
3369 .help = "display/fix current core played to gdb",
3370 .usage = "",
3371 },
3372 {
3373 .name = "maskisr",
3374 .handler = handle_cortex_a_mask_interrupts_command,
3375 .mode = COMMAND_EXEC,
3376 .help = "mask cortex_a interrupts",
3377 .usage = "['on'|'off']",
3378 },
3379
3380
3381 COMMAND_REGISTRATION_DONE
3382 };
3383 static const struct command_registration cortex_a_command_handlers[] = {
3384 {
3385 .chain = arm_command_handlers,
3386 },
3387 {
3388 .chain = armv7a_command_handlers,
3389 },
3390 {
3391 .name = "cortex_a",
3392 .mode = COMMAND_ANY,
3393 .help = "Cortex-A command group",
3394 .usage = "",
3395 .chain = cortex_a_exec_command_handlers,
3396 },
3397 COMMAND_REGISTRATION_DONE
3398 };
3399
3400 struct target_type cortexa_target = {
3401 .name = "cortex_a",
3402 .deprecated_name = "cortex_a8",
3403
3404 .poll = cortex_a_poll,
3405 .arch_state = armv7a_arch_state,
3406
3407 .halt = cortex_a_halt,
3408 .resume = cortex_a_resume,
3409 .step = cortex_a_step,
3410
3411 .assert_reset = cortex_a_assert_reset,
3412 .deassert_reset = cortex_a_deassert_reset,
3413
3414 /* REVISIT allow exporting VFP3 registers ... */
3415 .get_gdb_reg_list = arm_get_gdb_reg_list,
3416
3417 .read_memory = cortex_a_read_memory,
3418 .write_memory = cortex_a_write_memory,
3419
3420 .read_buffer = cortex_a_read_buffer,
3421 .write_buffer = cortex_a_write_buffer,
3422
3423 .checksum_memory = arm_checksum_memory,
3424 .blank_check_memory = arm_blank_check_memory,
3425
3426 .run_algorithm = armv4_5_run_algorithm,
3427
3428 .add_breakpoint = cortex_a_add_breakpoint,
3429 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3430 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3431 .remove_breakpoint = cortex_a_remove_breakpoint,
3432 .add_watchpoint = NULL,
3433 .remove_watchpoint = NULL,
3434
3435 .commands = cortex_a_command_handlers,
3436 .target_create = cortex_a_target_create,
3437 .init_target = cortex_a_init_target,
3438 .examine = cortex_a_examine,
3439 .deinit_target = cortex_a_deinit_target,
3440
3441 .read_phys_memory = cortex_a_read_phys_memory,
3442 .write_phys_memory = cortex_a_write_phys_memory,
3443 .mmu = cortex_a_mmu,
3444 .virt2phys = cortex_a_virt2phys,
3445 };
3446
3447 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3448 {
3449 .name = "cache_info",
3450 .handler = cortex_a_handle_cache_info_command,
3451 .mode = COMMAND_EXEC,
3452 .help = "display information about target caches",
3453 .usage = "",
3454 },
3455 {
3456 .name = "dbginit",
3457 .handler = cortex_a_handle_dbginit_command,
3458 .mode = COMMAND_EXEC,
3459 .help = "Initialize core debug",
3460 .usage = "",
3461 },
3462 {
3463 .name = "maskisr",
3464 .handler = handle_cortex_a_mask_interrupts_command,
3465 .mode = COMMAND_EXEC,
3466 .help = "mask cortex_r4 interrupts",
3467 .usage = "['on'|'off']",
3468 },
3469
3470 COMMAND_REGISTRATION_DONE
3471 };
3472 static const struct command_registration cortex_r4_command_handlers[] = {
3473 {
3474 .chain = arm_command_handlers,
3475 },
3476 {
3477 .chain = armv7a_command_handlers,
3478 },
3479 {
3480 .name = "cortex_r4",
3481 .mode = COMMAND_ANY,
3482 .help = "Cortex-R4 command group",
3483 .usage = "",
3484 .chain = cortex_r4_exec_command_handlers,
3485 },
3486 COMMAND_REGISTRATION_DONE
3487 };
3488
3489 struct target_type cortexr4_target = {
3490 .name = "cortex_r4",
3491
3492 .poll = cortex_a_poll,
3493 .arch_state = armv7a_arch_state,
3494
3495 .halt = cortex_a_halt,
3496 .resume = cortex_a_resume,
3497 .step = cortex_a_step,
3498
3499 .assert_reset = cortex_a_assert_reset,
3500 .deassert_reset = cortex_a_deassert_reset,
3501
3502 /* REVISIT allow exporting VFP3 registers ... */
3503 .get_gdb_reg_list = arm_get_gdb_reg_list,
3504
3505 .read_memory = cortex_a_read_memory,
3506 .write_memory = cortex_a_write_memory,
3507
3508 .checksum_memory = arm_checksum_memory,
3509 .blank_check_memory = arm_blank_check_memory,
3510
3511 .run_algorithm = armv4_5_run_algorithm,
3512
3513 .add_breakpoint = cortex_a_add_breakpoint,
3514 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3515 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3516 .remove_breakpoint = cortex_a_remove_breakpoint,
3517 .add_watchpoint = NULL,
3518 .remove_watchpoint = NULL,
3519
3520 .commands = cortex_r4_command_handlers,
3521 .target_create = cortex_r4_target_create,
3522 .init_target = cortex_a_init_target,
3523 .examine = cortex_a_examine,
3524 .deinit_target = cortex_a_deinit_target,
3525 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)