cortex_a: Add Cortex-A5 identification
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_apb_ab_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
142 cortex_a->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a->cp15_control_reg_curr);
147 }
148 } else {
149 if (cortex_a->cp15_control_reg_curr & 0x4U) {
150 /* data cache is active */
151 cortex_a->cp15_control_reg_curr &= ~0x4U;
152 /* flush data cache armv7 function to be called */
153 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
154 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
155 }
156 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
157 cortex_a->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175
176 LOG_DEBUG(" ");
177
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
181 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
182 if (retval != ERROR_OK) {
183 /* try again */
184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
185 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
186 if (retval == ERROR_OK)
187 LOG_USER(
188 "Locking debug access failed on first, but succeeded on second try.");
189 }
190
191 return retval;
192 }
193
194 /*
195 * Cortex-A Basic debug access, very low level assumes state is saved
196 */
197 static int cortex_a_init_debug_access(struct target *target)
198 {
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = armv7a->arm.dap;
201 int retval;
202 uint32_t dbg_osreg;
203 uint32_t cortex_part_num;
204 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
205
206 LOG_DEBUG(" ");
207 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT;
209
210 switch (cortex_part_num) {
211 case CORTEX_A7_PARTNUM:
212 case CORTEX_A15_PARTNUM:
213 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
214 armv7a->debug_base + CPUDBG_OSLSR,
215 &dbg_osreg);
216 if (retval != ERROR_OK)
217 return retval;
218
219 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
220
221 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
222 /* Unlocking the DEBUG OS registers for modification */
223 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_OSLAR,
225 0);
226 break;
227
228 case CORTEX_A5_PARTNUM:
229 case CORTEX_A8_PARTNUM:
230 case CORTEX_A9_PARTNUM:
231 default:
232 retval = cortex_a8_init_debug_access(target);
233 }
234
235 if (retval != ERROR_OK)
236 return retval;
237 /* Clear Sticky Power Down status Bit in PRSR to enable access to
238 the registers in the Core Power Domain */
239 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
240 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
241 LOG_DEBUG("target->coreid %d DBGPRSR 0x%x ", target->coreid, dbg_osreg);
242
243 if (retval != ERROR_OK)
244 return retval;
245
246 /* Enabling of instruction execution in debug mode is done in debug_entry code */
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return cortex_a_poll(target);
252 }
253
254 /* To reduce needless round-trips, pass in a pointer to the current
255 * DSCR value. Initialize it to zero if you just need to know the
256 * value on return from this function; or DSCR_INSTR_COMP if you
257 * happen to know that no instruction is pending.
258 */
259 static int cortex_a_exec_opcode(struct target *target,
260 uint32_t opcode, uint32_t *dscr_p)
261 {
262 uint32_t dscr;
263 int retval;
264 struct armv7a_common *armv7a = target_to_armv7a(target);
265 struct adiv5_dap *swjdp = armv7a->arm.dap;
266
267 dscr = dscr_p ? *dscr_p : 0;
268
269 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
270
271 /* Wait for InstrCompl bit to be set */
272 long long then = timeval_ms();
273 while ((dscr & DSCR_INSTR_COMP) == 0) {
274 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
275 armv7a->debug_base + CPUDBG_DSCR, &dscr);
276 if (retval != ERROR_OK) {
277 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
278 return retval;
279 }
280 if (timeval_ms() > then + 1000) {
281 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
282 return ERROR_FAIL;
283 }
284 }
285
286 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
287 armv7a->debug_base + CPUDBG_ITR, opcode);
288 if (retval != ERROR_OK)
289 return retval;
290
291 then = timeval_ms();
292 do {
293 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
294 armv7a->debug_base + CPUDBG_DSCR, &dscr);
295 if (retval != ERROR_OK) {
296 LOG_ERROR("Could not read DSCR register");
297 return retval;
298 }
299 if (timeval_ms() > then + 1000) {
300 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
301 return ERROR_FAIL;
302 }
303 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
304
305 if (dscr_p)
306 *dscr_p = dscr;
307
308 return retval;
309 }
310
311 /**************************************************************************
312 Read core register with very few exec_opcode, fast but needs work_area.
313 This can cause problems with MMU active.
314 **************************************************************************/
315 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
316 uint32_t *regfile)
317 {
318 int retval = ERROR_OK;
319 struct armv7a_common *armv7a = target_to_armv7a(target);
320 struct adiv5_dap *swjdp = armv7a->arm.dap;
321
322 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
323 if (retval != ERROR_OK)
324 return retval;
325 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
326 if (retval != ERROR_OK)
327 return retval;
328 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
329 if (retval != ERROR_OK)
330 return retval;
331
332 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
333 (uint8_t *)(&regfile[1]), 4, 15, address);
334
335 return retval;
336 }
337
338 static int cortex_a_dap_read_coreregister_u32(struct target *target,
339 uint32_t *value, int regnum)
340 {
341 int retval = ERROR_OK;
342 uint8_t reg = regnum&0xFF;
343 uint32_t dscr = 0;
344 struct armv7a_common *armv7a = target_to_armv7a(target);
345 struct adiv5_dap *swjdp = armv7a->arm.dap;
346
347 if (reg > 17)
348 return retval;
349
350 if (reg < 15) {
351 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
352 retval = cortex_a_exec_opcode(target,
353 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
354 &dscr);
355 if (retval != ERROR_OK)
356 return retval;
357 } else if (reg == 15) {
358 /* "MOV r0, r15"; then move r0 to DCCTX */
359 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 retval = cortex_a_exec_opcode(target,
363 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
364 &dscr);
365 if (retval != ERROR_OK)
366 return retval;
367 } else {
368 /* "MRS r0, CPSR" or "MRS r0, SPSR"
369 * then move r0 to DCCTX
370 */
371 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
372 if (retval != ERROR_OK)
373 return retval;
374 retval = cortex_a_exec_opcode(target,
375 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
376 &dscr);
377 if (retval != ERROR_OK)
378 return retval;
379 }
380
381 /* Wait for DTRRXfull then read DTRRTX */
382 long long then = timeval_ms();
383 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
384 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
385 armv7a->debug_base + CPUDBG_DSCR, &dscr);
386 if (retval != ERROR_OK)
387 return retval;
388 if (timeval_ms() > then + 1000) {
389 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
390 return ERROR_FAIL;
391 }
392 }
393
394 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
395 armv7a->debug_base + CPUDBG_DTRTX, value);
396 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
397
398 return retval;
399 }
400
401 static int cortex_a_dap_write_coreregister_u32(struct target *target,
402 uint32_t value, int regnum)
403 {
404 int retval = ERROR_OK;
405 uint8_t Rd = regnum&0xFF;
406 uint32_t dscr;
407 struct armv7a_common *armv7a = target_to_armv7a(target);
408 struct adiv5_dap *swjdp = armv7a->arm.dap;
409
410 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
411
412 /* Check that DCCRX is not full */
413 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
414 armv7a->debug_base + CPUDBG_DSCR, &dscr);
415 if (retval != ERROR_OK)
416 return retval;
417 if (dscr & DSCR_DTR_RX_FULL) {
418 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
419 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
420 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
421 &dscr);
422 if (retval != ERROR_OK)
423 return retval;
424 }
425
426 if (Rd > 17)
427 return retval;
428
429 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
430 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
431 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
432 armv7a->debug_base + CPUDBG_DTRRX, value);
433 if (retval != ERROR_OK)
434 return retval;
435
436 if (Rd < 15) {
437 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
438 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
439 &dscr);
440
441 if (retval != ERROR_OK)
442 return retval;
443 } else if (Rd == 15) {
444 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
445 * then "mov r15, r0"
446 */
447 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
448 &dscr);
449 if (retval != ERROR_OK)
450 return retval;
451 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
452 if (retval != ERROR_OK)
453 return retval;
454 } else {
455 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
456 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
457 */
458 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
459 &dscr);
460 if (retval != ERROR_OK)
461 return retval;
462 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
463 &dscr);
464 if (retval != ERROR_OK)
465 return retval;
466
467 /* "Prefetch flush" after modifying execution status in CPSR */
468 if (Rd == 16) {
469 retval = cortex_a_exec_opcode(target,
470 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
471 &dscr);
472 if (retval != ERROR_OK)
473 return retval;
474 }
475 }
476
477 return retval;
478 }
479
480 /* Write to memory mapped registers directly with no cache or mmu handling */
481 static int cortex_a_dap_write_memap_register_u32(struct target *target,
482 uint32_t address,
483 uint32_t value)
484 {
485 int retval;
486 struct armv7a_common *armv7a = target_to_armv7a(target);
487 struct adiv5_dap *swjdp = armv7a->arm.dap;
488
489 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
490
491 return retval;
492 }
493
494 /*
495 * Cortex-A implementation of Debug Programmer's Model
496 *
497 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
498 * so there's no need to poll for it before executing an instruction.
499 *
500 * NOTE that in several of these cases the "stall" mode might be useful.
501 * It'd let us queue a few operations together... prepare/finish might
502 * be the places to enable/disable that mode.
503 */
504
505 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
506 {
507 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
508 }
509
510 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
511 {
512 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
513 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
514 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
515 }
516
517 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
518 uint32_t *dscr_p)
519 {
520 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
521 uint32_t dscr = DSCR_INSTR_COMP;
522 int retval;
523
524 if (dscr_p)
525 dscr = *dscr_p;
526
527 /* Wait for DTRRXfull */
528 long long then = timeval_ms();
529 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
530 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
531 a->armv7a_common.debug_base + CPUDBG_DSCR,
532 &dscr);
533 if (retval != ERROR_OK)
534 return retval;
535 if (timeval_ms() > then + 1000) {
536 LOG_ERROR("Timeout waiting for read dcc");
537 return ERROR_FAIL;
538 }
539 }
540
541 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
542 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
543 if (retval != ERROR_OK)
544 return retval;
545 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
546
547 if (dscr_p)
548 *dscr_p = dscr;
549
550 return retval;
551 }
552
553 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
554 {
555 struct cortex_a_common *a = dpm_to_a(dpm);
556 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
557 uint32_t dscr;
558 int retval;
559
560 /* set up invariant: INSTR_COMP is set after ever DPM operation */
561 long long then = timeval_ms();
562 for (;; ) {
563 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
564 a->armv7a_common.debug_base + CPUDBG_DSCR,
565 &dscr);
566 if (retval != ERROR_OK)
567 return retval;
568 if ((dscr & DSCR_INSTR_COMP) != 0)
569 break;
570 if (timeval_ms() > then + 1000) {
571 LOG_ERROR("Timeout waiting for dpm prepare");
572 return ERROR_FAIL;
573 }
574 }
575
576 /* this "should never happen" ... */
577 if (dscr & DSCR_DTR_RX_FULL) {
578 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
579 /* Clear DCCRX */
580 retval = cortex_a_exec_opcode(
581 a->armv7a_common.arm.target,
582 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
583 &dscr);
584 if (retval != ERROR_OK)
585 return retval;
586 }
587
588 return retval;
589 }
590
591 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
592 {
593 /* REVISIT what could be done here? */
594 return ERROR_OK;
595 }
596
597 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
598 uint32_t opcode, uint32_t data)
599 {
600 struct cortex_a_common *a = dpm_to_a(dpm);
601 int retval;
602 uint32_t dscr = DSCR_INSTR_COMP;
603
604 retval = cortex_a_write_dcc(a, data);
605 if (retval != ERROR_OK)
606 return retval;
607
608 return cortex_a_exec_opcode(
609 a->armv7a_common.arm.target,
610 opcode,
611 &dscr);
612 }
613
614 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
615 uint32_t opcode, uint32_t data)
616 {
617 struct cortex_a_common *a = dpm_to_a(dpm);
618 uint32_t dscr = DSCR_INSTR_COMP;
619 int retval;
620
621 retval = cortex_a_write_dcc(a, data);
622 if (retval != ERROR_OK)
623 return retval;
624
625 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
626 retval = cortex_a_exec_opcode(
627 a->armv7a_common.arm.target,
628 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
629 &dscr);
630 if (retval != ERROR_OK)
631 return retval;
632
633 /* then the opcode, taking data from R0 */
634 retval = cortex_a_exec_opcode(
635 a->armv7a_common.arm.target,
636 opcode,
637 &dscr);
638
639 return retval;
640 }
641
642 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
643 {
644 struct target *target = dpm->arm->target;
645 uint32_t dscr = DSCR_INSTR_COMP;
646
647 /* "Prefetch flush" after modifying execution status in CPSR */
648 return cortex_a_exec_opcode(target,
649 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
650 &dscr);
651 }
652
653 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
654 uint32_t opcode, uint32_t *data)
655 {
656 struct cortex_a_common *a = dpm_to_a(dpm);
657 int retval;
658 uint32_t dscr = DSCR_INSTR_COMP;
659
660 /* the opcode, writing data to DCC */
661 retval = cortex_a_exec_opcode(
662 a->armv7a_common.arm.target,
663 opcode,
664 &dscr);
665 if (retval != ERROR_OK)
666 return retval;
667
668 return cortex_a_read_dcc(a, data, &dscr);
669 }
670
671
672 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
673 uint32_t opcode, uint32_t *data)
674 {
675 struct cortex_a_common *a = dpm_to_a(dpm);
676 uint32_t dscr = DSCR_INSTR_COMP;
677 int retval;
678
679 /* the opcode, writing data to R0 */
680 retval = cortex_a_exec_opcode(
681 a->armv7a_common.arm.target,
682 opcode,
683 &dscr);
684 if (retval != ERROR_OK)
685 return retval;
686
687 /* write R0 to DCC */
688 retval = cortex_a_exec_opcode(
689 a->armv7a_common.arm.target,
690 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
691 &dscr);
692 if (retval != ERROR_OK)
693 return retval;
694
695 return cortex_a_read_dcc(a, data, &dscr);
696 }
697
698 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
699 uint32_t addr, uint32_t control)
700 {
701 struct cortex_a_common *a = dpm_to_a(dpm);
702 uint32_t vr = a->armv7a_common.debug_base;
703 uint32_t cr = a->armv7a_common.debug_base;
704 int retval;
705
706 switch (index_t) {
707 case 0 ... 15: /* breakpoints */
708 vr += CPUDBG_BVR_BASE;
709 cr += CPUDBG_BCR_BASE;
710 break;
711 case 16 ... 31: /* watchpoints */
712 vr += CPUDBG_WVR_BASE;
713 cr += CPUDBG_WCR_BASE;
714 index_t -= 16;
715 break;
716 default:
717 return ERROR_FAIL;
718 }
719 vr += 4 * index_t;
720 cr += 4 * index_t;
721
722 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
723 (unsigned) vr, (unsigned) cr);
724
725 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
726 vr, addr);
727 if (retval != ERROR_OK)
728 return retval;
729 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
730 cr, control);
731 return retval;
732 }
733
734 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
735 {
736 struct cortex_a_common *a = dpm_to_a(dpm);
737 uint32_t cr;
738
739 switch (index_t) {
740 case 0 ... 15:
741 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
742 break;
743 case 16 ... 31:
744 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
745 index_t -= 16;
746 break;
747 default:
748 return ERROR_FAIL;
749 }
750 cr += 4 * index_t;
751
752 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
753
754 /* clear control register */
755 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
756 }
757
758 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
759 {
760 struct arm_dpm *dpm = &a->armv7a_common.dpm;
761 int retval;
762
763 dpm->arm = &a->armv7a_common.arm;
764 dpm->didr = didr;
765
766 dpm->prepare = cortex_a_dpm_prepare;
767 dpm->finish = cortex_a_dpm_finish;
768
769 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
770 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
771 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
772
773 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
774 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
775
776 dpm->bpwp_enable = cortex_a_bpwp_enable;
777 dpm->bpwp_disable = cortex_a_bpwp_disable;
778
779 retval = arm_dpm_setup(dpm);
780 if (retval == ERROR_OK)
781 retval = arm_dpm_initialize(dpm);
782
783 return retval;
784 }
785 static struct target *get_cortex_a(struct target *target, int32_t coreid)
786 {
787 struct target_list *head;
788 struct target *curr;
789
790 head = target->head;
791 while (head != (struct target_list *)NULL) {
792 curr = head->target;
793 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
794 return curr;
795 head = head->next;
796 }
797 return target;
798 }
799 static int cortex_a_halt(struct target *target);
800
801 static int cortex_a_halt_smp(struct target *target)
802 {
803 int retval = 0;
804 struct target_list *head;
805 struct target *curr;
806 head = target->head;
807 while (head != (struct target_list *)NULL) {
808 curr = head->target;
809 if ((curr != target) && (curr->state != TARGET_HALTED))
810 retval += cortex_a_halt(curr);
811 head = head->next;
812 }
813 return retval;
814 }
815
816 static int update_halt_gdb(struct target *target)
817 {
818 int retval = 0;
819 if (target->gdb_service && target->gdb_service->core[0] == -1) {
820 target->gdb_service->target = target;
821 target->gdb_service->core[0] = target->coreid;
822 retval += cortex_a_halt_smp(target);
823 }
824 return retval;
825 }
826
827 /*
828 * Cortex-A Run control
829 */
830
831 static int cortex_a_poll(struct target *target)
832 {
833 int retval = ERROR_OK;
834 uint32_t dscr;
835 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
836 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
837 struct adiv5_dap *swjdp = armv7a->arm.dap;
838 enum target_state prev_target_state = target->state;
839 /* toggle to another core is done by gdb as follow */
840 /* maint packet J core_id */
841 /* continue */
842 /* the next polling trigger an halt event sent to gdb */
843 if ((target->state == TARGET_HALTED) && (target->smp) &&
844 (target->gdb_service) &&
845 (target->gdb_service->target == NULL)) {
846 target->gdb_service->target =
847 get_cortex_a(target, target->gdb_service->core[1]);
848 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
849 return retval;
850 }
851 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
852 armv7a->debug_base + CPUDBG_DSCR, &dscr);
853 if (retval != ERROR_OK)
854 return retval;
855 cortex_a->cpudbg_dscr = dscr;
856
857 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
858 if (prev_target_state != TARGET_HALTED) {
859 /* We have a halting debug event */
860 LOG_DEBUG("Target halted");
861 target->state = TARGET_HALTED;
862 if ((prev_target_state == TARGET_RUNNING)
863 || (prev_target_state == TARGET_UNKNOWN)
864 || (prev_target_state == TARGET_RESET)) {
865 retval = cortex_a_debug_entry(target);
866 if (retval != ERROR_OK)
867 return retval;
868 if (target->smp) {
869 retval = update_halt_gdb(target);
870 if (retval != ERROR_OK)
871 return retval;
872 }
873 target_call_event_callbacks(target,
874 TARGET_EVENT_HALTED);
875 }
876 if (prev_target_state == TARGET_DEBUG_RUNNING) {
877 LOG_DEBUG(" ");
878
879 retval = cortex_a_debug_entry(target);
880 if (retval != ERROR_OK)
881 return retval;
882 if (target->smp) {
883 retval = update_halt_gdb(target);
884 if (retval != ERROR_OK)
885 return retval;
886 }
887
888 target_call_event_callbacks(target,
889 TARGET_EVENT_DEBUG_HALTED);
890 }
891 }
892 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
893 target->state = TARGET_RUNNING;
894 else {
895 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
896 target->state = TARGET_UNKNOWN;
897 }
898
899 return retval;
900 }
901
902 static int cortex_a_halt(struct target *target)
903 {
904 int retval = ERROR_OK;
905 uint32_t dscr;
906 struct armv7a_common *armv7a = target_to_armv7a(target);
907 struct adiv5_dap *swjdp = armv7a->arm.dap;
908
909 /*
910 * Tell the core to be halted by writing DRCR with 0x1
911 * and then wait for the core to be halted.
912 */
913 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
914 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
915 if (retval != ERROR_OK)
916 return retval;
917
918 /*
919 * enter halting debug mode
920 */
921 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
922 armv7a->debug_base + CPUDBG_DSCR, &dscr);
923 if (retval != ERROR_OK)
924 return retval;
925
926 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
927 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
928 if (retval != ERROR_OK)
929 return retval;
930
931 long long then = timeval_ms();
932 for (;; ) {
933 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
934 armv7a->debug_base + CPUDBG_DSCR, &dscr);
935 if (retval != ERROR_OK)
936 return retval;
937 if ((dscr & DSCR_CORE_HALTED) != 0)
938 break;
939 if (timeval_ms() > then + 1000) {
940 LOG_ERROR("Timeout waiting for halt");
941 return ERROR_FAIL;
942 }
943 }
944
945 target->debug_reason = DBG_REASON_DBGRQ;
946
947 return ERROR_OK;
948 }
949
950 static int cortex_a_internal_restore(struct target *target, int current,
951 uint32_t *address, int handle_breakpoints, int debug_execution)
952 {
953 struct armv7a_common *armv7a = target_to_armv7a(target);
954 struct arm *arm = &armv7a->arm;
955 int retval;
956 uint32_t resume_pc;
957
958 if (!debug_execution)
959 target_free_all_working_areas(target);
960
961 #if 0
962 if (debug_execution) {
963 /* Disable interrupts */
964 /* We disable interrupts in the PRIMASK register instead of
965 * masking with C_MASKINTS,
966 * This is probably the same issue as Cortex-M3 Errata 377493:
967 * C_MASKINTS in parallel with disabled interrupts can cause
968 * local faults to not be taken. */
969 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
970 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
971 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
972
973 /* Make sure we are in Thumb mode */
974 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
975 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
976 32) | (1 << 24));
977 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
978 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
979 }
980 #endif
981
982 /* current = 1: continue on current pc, otherwise continue at <address> */
983 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
984 if (!current)
985 resume_pc = *address;
986 else
987 *address = resume_pc;
988
989 /* Make sure that the Armv7 gdb thumb fixups does not
990 * kill the return address
991 */
992 switch (arm->core_state) {
993 case ARM_STATE_ARM:
994 resume_pc &= 0xFFFFFFFC;
995 break;
996 case ARM_STATE_THUMB:
997 case ARM_STATE_THUMB_EE:
998 /* When the return address is loaded into PC
999 * bit 0 must be 1 to stay in Thumb state
1000 */
1001 resume_pc |= 0x1;
1002 break;
1003 case ARM_STATE_JAZELLE:
1004 LOG_ERROR("How do I resume into Jazelle state??");
1005 return ERROR_FAIL;
1006 }
1007 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1008 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1009 arm->pc->dirty = 1;
1010 arm->pc->valid = 1;
1011 /* restore dpm_mode at system halt */
1012 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1013 /* called it now before restoring context because it uses cpu
1014 * register r0 for restoring cp15 control register */
1015 retval = cortex_a_restore_cp15_control_reg(target);
1016 if (retval != ERROR_OK)
1017 return retval;
1018 retval = cortex_a_restore_context(target, handle_breakpoints);
1019 if (retval != ERROR_OK)
1020 return retval;
1021 target->debug_reason = DBG_REASON_NOTHALTED;
1022 target->state = TARGET_RUNNING;
1023
1024 /* registers are now invalid */
1025 register_cache_invalidate(arm->core_cache);
1026
1027 #if 0
1028 /* the front-end may request us not to handle breakpoints */
1029 if (handle_breakpoints) {
1030 /* Single step past breakpoint at current address */
1031 breakpoint = breakpoint_find(target, resume_pc);
1032 if (breakpoint) {
1033 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1034 cortex_m3_unset_breakpoint(target, breakpoint);
1035 cortex_m3_single_step_core(target);
1036 cortex_m3_set_breakpoint(target, breakpoint);
1037 }
1038 }
1039
1040 #endif
1041 return retval;
1042 }
1043
1044 static int cortex_a_internal_restart(struct target *target)
1045 {
1046 struct armv7a_common *armv7a = target_to_armv7a(target);
1047 struct arm *arm = &armv7a->arm;
1048 struct adiv5_dap *swjdp = arm->dap;
1049 int retval;
1050 uint32_t dscr;
1051 /*
1052 * * Restart core and wait for it to be started. Clear ITRen and sticky
1053 * * exception flags: see ARMv7 ARM, C5.9.
1054 *
1055 * REVISIT: for single stepping, we probably want to
1056 * disable IRQs by default, with optional override...
1057 */
1058
1059 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1060 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1061 if (retval != ERROR_OK)
1062 return retval;
1063
1064 if ((dscr & DSCR_INSTR_COMP) == 0)
1065 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1066
1067 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1068 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1069 if (retval != ERROR_OK)
1070 return retval;
1071
1072 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1073 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1074 DRCR_CLEAR_EXCEPTIONS);
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 long long then = timeval_ms();
1079 for (;; ) {
1080 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1081 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1082 if (retval != ERROR_OK)
1083 return retval;
1084 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1085 break;
1086 if (timeval_ms() > then + 1000) {
1087 LOG_ERROR("Timeout waiting for resume");
1088 return ERROR_FAIL;
1089 }
1090 }
1091
1092 target->debug_reason = DBG_REASON_NOTHALTED;
1093 target->state = TARGET_RUNNING;
1094
1095 /* registers are now invalid */
1096 register_cache_invalidate(arm->core_cache);
1097
1098 return ERROR_OK;
1099 }
1100
1101 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1102 {
1103 int retval = 0;
1104 struct target_list *head;
1105 struct target *curr;
1106 uint32_t address;
1107 head = target->head;
1108 while (head != (struct target_list *)NULL) {
1109 curr = head->target;
1110 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1111 /* resume current address , not in step mode */
1112 retval += cortex_a_internal_restore(curr, 1, &address,
1113 handle_breakpoints, 0);
1114 retval += cortex_a_internal_restart(curr);
1115 }
1116 head = head->next;
1117
1118 }
1119 return retval;
1120 }
1121
1122 static int cortex_a_resume(struct target *target, int current,
1123 uint32_t address, int handle_breakpoints, int debug_execution)
1124 {
1125 int retval = 0;
1126 /* dummy resume for smp toggle in order to reduce gdb impact */
1127 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1128 /* simulate a start and halt of target */
1129 target->gdb_service->target = NULL;
1130 target->gdb_service->core[0] = target->gdb_service->core[1];
1131 /* fake resume at next poll we play the target core[1], see poll*/
1132 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1133 return 0;
1134 }
1135 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1136 if (target->smp) {
1137 target->gdb_service->core[0] = -1;
1138 retval = cortex_a_restore_smp(target, handle_breakpoints);
1139 if (retval != ERROR_OK)
1140 return retval;
1141 }
1142 cortex_a_internal_restart(target);
1143
1144 if (!debug_execution) {
1145 target->state = TARGET_RUNNING;
1146 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1147 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1148 } else {
1149 target->state = TARGET_DEBUG_RUNNING;
1150 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1151 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1152 }
1153
1154 return ERROR_OK;
1155 }
1156
1157 static int cortex_a_debug_entry(struct target *target)
1158 {
1159 int i;
1160 uint32_t regfile[16], cpsr, dscr;
1161 int retval = ERROR_OK;
1162 struct working_area *regfile_working_area = NULL;
1163 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1164 struct armv7a_common *armv7a = target_to_armv7a(target);
1165 struct arm *arm = &armv7a->arm;
1166 struct adiv5_dap *swjdp = armv7a->arm.dap;
1167 struct reg *reg;
1168
1169 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1170
1171 /* REVISIT surely we should not re-read DSCR !! */
1172 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1173 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1174 if (retval != ERROR_OK)
1175 return retval;
1176
1177 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1178 * imprecise data aborts get discarded by issuing a Data
1179 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1180 */
1181
1182 /* Enable the ITR execution once we are in debug mode */
1183 dscr |= DSCR_ITR_EN;
1184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1185 armv7a->debug_base + CPUDBG_DSCR, dscr);
1186 if (retval != ERROR_OK)
1187 return retval;
1188
1189 /* Examine debug reason */
1190 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1191
1192 /* save address of instruction that triggered the watchpoint? */
1193 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1194 uint32_t wfar;
1195
1196 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1197 armv7a->debug_base + CPUDBG_WFAR,
1198 &wfar);
1199 if (retval != ERROR_OK)
1200 return retval;
1201 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1202 }
1203
1204 /* REVISIT fast_reg_read is never set ... */
1205
1206 /* Examine target state and mode */
1207 if (cortex_a->fast_reg_read)
1208 target_alloc_working_area(target, 64, &regfile_working_area);
1209
1210 /* First load register acessible through core debug port*/
1211 if (!regfile_working_area)
1212 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1213 else {
1214 retval = cortex_a_read_regs_through_mem(target,
1215 regfile_working_area->address, regfile);
1216
1217 target_free_working_area(target, regfile_working_area);
1218 if (retval != ERROR_OK)
1219 return retval;
1220
1221 /* read Current PSR */
1222 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1223 /* store current cpsr */
1224 if (retval != ERROR_OK)
1225 return retval;
1226
1227 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1228
1229 arm_set_cpsr(arm, cpsr);
1230
1231 /* update cache */
1232 for (i = 0; i <= ARM_PC; i++) {
1233 reg = arm_reg_current(arm, i);
1234
1235 buf_set_u32(reg->value, 0, 32, regfile[i]);
1236 reg->valid = 1;
1237 reg->dirty = 0;
1238 }
1239
1240 /* Fixup PC Resume Address */
1241 if (cpsr & (1 << 5)) {
1242 /* T bit set for Thumb or ThumbEE state */
1243 regfile[ARM_PC] -= 4;
1244 } else {
1245 /* ARM state */
1246 regfile[ARM_PC] -= 8;
1247 }
1248
1249 reg = arm->pc;
1250 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1251 reg->dirty = reg->valid;
1252 }
1253
1254 #if 0
1255 /* TODO, Move this */
1256 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1257 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1258 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1259
1260 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1261 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1262
1263 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1264 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1265 #endif
1266
1267 /* Are we in an exception handler */
1268 /* armv4_5->exception_number = 0; */
1269 if (armv7a->post_debug_entry) {
1270 retval = armv7a->post_debug_entry(target);
1271 if (retval != ERROR_OK)
1272 return retval;
1273 }
1274
1275 return retval;
1276 }
1277
1278 static int cortex_a_post_debug_entry(struct target *target)
1279 {
1280 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1281 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1282 int retval;
1283
1284 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1285 retval = armv7a->arm.mrc(target, 15,
1286 0, 0, /* op1, op2 */
1287 1, 0, /* CRn, CRm */
1288 &cortex_a->cp15_control_reg);
1289 if (retval != ERROR_OK)
1290 return retval;
1291 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1292 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1293
1294 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1295 armv7a_identify_cache(target);
1296
1297 if (armv7a->is_armv7r) {
1298 armv7a->armv7a_mmu.mmu_enabled = 0;
1299 } else {
1300 armv7a->armv7a_mmu.mmu_enabled =
1301 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1302 }
1303 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1304 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1305 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1306 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1307 cortex_a->curr_mode = armv7a->arm.core_mode;
1308
1309 return ERROR_OK;
1310 }
1311
1312 static int cortex_a_step(struct target *target, int current, uint32_t address,
1313 int handle_breakpoints)
1314 {
1315 struct armv7a_common *armv7a = target_to_armv7a(target);
1316 struct arm *arm = &armv7a->arm;
1317 struct breakpoint *breakpoint = NULL;
1318 struct breakpoint stepbreakpoint;
1319 struct reg *r;
1320 int retval;
1321
1322 if (target->state != TARGET_HALTED) {
1323 LOG_WARNING("target not halted");
1324 return ERROR_TARGET_NOT_HALTED;
1325 }
1326
1327 /* current = 1: continue on current pc, otherwise continue at <address> */
1328 r = arm->pc;
1329 if (!current)
1330 buf_set_u32(r->value, 0, 32, address);
1331 else
1332 address = buf_get_u32(r->value, 0, 32);
1333
1334 /* The front-end may request us not to handle breakpoints.
1335 * But since Cortex-A uses breakpoint for single step,
1336 * we MUST handle breakpoints.
1337 */
1338 handle_breakpoints = 1;
1339 if (handle_breakpoints) {
1340 breakpoint = breakpoint_find(target, address);
1341 if (breakpoint)
1342 cortex_a_unset_breakpoint(target, breakpoint);
1343 }
1344
1345 /* Setup single step breakpoint */
1346 stepbreakpoint.address = address;
1347 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1348 ? 2 : 4;
1349 stepbreakpoint.type = BKPT_HARD;
1350 stepbreakpoint.set = 0;
1351
1352 /* Break on IVA mismatch */
1353 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1354
1355 target->debug_reason = DBG_REASON_SINGLESTEP;
1356
1357 retval = cortex_a_resume(target, 1, address, 0, 0);
1358 if (retval != ERROR_OK)
1359 return retval;
1360
1361 long long then = timeval_ms();
1362 while (target->state != TARGET_HALTED) {
1363 retval = cortex_a_poll(target);
1364 if (retval != ERROR_OK)
1365 return retval;
1366 if (timeval_ms() > then + 1000) {
1367 LOG_ERROR("timeout waiting for target halt");
1368 return ERROR_FAIL;
1369 }
1370 }
1371
1372 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1373
1374 target->debug_reason = DBG_REASON_BREAKPOINT;
1375
1376 if (breakpoint)
1377 cortex_a_set_breakpoint(target, breakpoint, 0);
1378
1379 if (target->state != TARGET_HALTED)
1380 LOG_DEBUG("target stepped");
1381
1382 return ERROR_OK;
1383 }
1384
1385 static int cortex_a_restore_context(struct target *target, bool bpwp)
1386 {
1387 struct armv7a_common *armv7a = target_to_armv7a(target);
1388
1389 LOG_DEBUG(" ");
1390
1391 if (armv7a->pre_restore_context)
1392 armv7a->pre_restore_context(target);
1393
1394 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1395 }
1396
1397 /*
1398 * Cortex-A Breakpoint and watchpoint functions
1399 */
1400
1401 /* Setup hardware Breakpoint Register Pair */
1402 static int cortex_a_set_breakpoint(struct target *target,
1403 struct breakpoint *breakpoint, uint8_t matchmode)
1404 {
1405 int retval;
1406 int brp_i = 0;
1407 uint32_t control;
1408 uint8_t byte_addr_select = 0x0F;
1409 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1410 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1411 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1412
1413 if (breakpoint->set) {
1414 LOG_WARNING("breakpoint already set");
1415 return ERROR_OK;
1416 }
1417
1418 if (breakpoint->type == BKPT_HARD) {
1419 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1420 brp_i++;
1421 if (brp_i >= cortex_a->brp_num) {
1422 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1423 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1424 }
1425 breakpoint->set = brp_i + 1;
1426 if (breakpoint->length == 2)
1427 byte_addr_select = (3 << (breakpoint->address & 0x02));
1428 control = ((matchmode & 0x7) << 20)
1429 | (byte_addr_select << 5)
1430 | (3 << 1) | 1;
1431 brp_list[brp_i].used = 1;
1432 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1433 brp_list[brp_i].control = control;
1434 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1435 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1436 brp_list[brp_i].value);
1437 if (retval != ERROR_OK)
1438 return retval;
1439 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1440 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1441 brp_list[brp_i].control);
1442 if (retval != ERROR_OK)
1443 return retval;
1444 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1445 brp_list[brp_i].control,
1446 brp_list[brp_i].value);
1447 } else if (breakpoint->type == BKPT_SOFT) {
1448 uint8_t code[4];
1449 if (breakpoint->length == 2)
1450 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1451 else
1452 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1453 retval = target_read_memory(target,
1454 breakpoint->address & 0xFFFFFFFE,
1455 breakpoint->length, 1,
1456 breakpoint->orig_instr);
1457 if (retval != ERROR_OK)
1458 return retval;
1459 retval = target_write_memory(target,
1460 breakpoint->address & 0xFFFFFFFE,
1461 breakpoint->length, 1, code);
1462 if (retval != ERROR_OK)
1463 return retval;
1464 breakpoint->set = 0x11; /* Any nice value but 0 */
1465 }
1466
1467 return ERROR_OK;
1468 }
1469
1470 static int cortex_a_set_context_breakpoint(struct target *target,
1471 struct breakpoint *breakpoint, uint8_t matchmode)
1472 {
1473 int retval = ERROR_FAIL;
1474 int brp_i = 0;
1475 uint32_t control;
1476 uint8_t byte_addr_select = 0x0F;
1477 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1478 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1479 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1480
1481 if (breakpoint->set) {
1482 LOG_WARNING("breakpoint already set");
1483 return retval;
1484 }
1485 /*check available context BRPs*/
1486 while ((brp_list[brp_i].used ||
1487 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1488 brp_i++;
1489
1490 if (brp_i >= cortex_a->brp_num) {
1491 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1492 return ERROR_FAIL;
1493 }
1494
1495 breakpoint->set = brp_i + 1;
1496 control = ((matchmode & 0x7) << 20)
1497 | (byte_addr_select << 5)
1498 | (3 << 1) | 1;
1499 brp_list[brp_i].used = 1;
1500 brp_list[brp_i].value = (breakpoint->asid);
1501 brp_list[brp_i].control = control;
1502 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1503 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1504 brp_list[brp_i].value);
1505 if (retval != ERROR_OK)
1506 return retval;
1507 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1508 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1509 brp_list[brp_i].control);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1513 brp_list[brp_i].control,
1514 brp_list[brp_i].value);
1515 return ERROR_OK;
1516
1517 }
1518
1519 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1520 {
1521 int retval = ERROR_FAIL;
1522 int brp_1 = 0; /* holds the contextID pair */
1523 int brp_2 = 0; /* holds the IVA pair */
1524 uint32_t control_CTX, control_IVA;
1525 uint8_t CTX_byte_addr_select = 0x0F;
1526 uint8_t IVA_byte_addr_select = 0x0F;
1527 uint8_t CTX_machmode = 0x03;
1528 uint8_t IVA_machmode = 0x01;
1529 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1530 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1531 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1532
1533 if (breakpoint->set) {
1534 LOG_WARNING("breakpoint already set");
1535 return retval;
1536 }
1537 /*check available context BRPs*/
1538 while ((brp_list[brp_1].used ||
1539 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1540 brp_1++;
1541
1542 printf("brp(CTX) found num: %d\n", brp_1);
1543 if (brp_1 >= cortex_a->brp_num) {
1544 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1545 return ERROR_FAIL;
1546 }
1547
1548 while ((brp_list[brp_2].used ||
1549 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1550 brp_2++;
1551
1552 printf("brp(IVA) found num: %d\n", brp_2);
1553 if (brp_2 >= cortex_a->brp_num) {
1554 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1555 return ERROR_FAIL;
1556 }
1557
1558 breakpoint->set = brp_1 + 1;
1559 breakpoint->linked_BRP = brp_2;
1560 control_CTX = ((CTX_machmode & 0x7) << 20)
1561 | (brp_2 << 16)
1562 | (0 << 14)
1563 | (CTX_byte_addr_select << 5)
1564 | (3 << 1) | 1;
1565 brp_list[brp_1].used = 1;
1566 brp_list[brp_1].value = (breakpoint->asid);
1567 brp_list[brp_1].control = control_CTX;
1568 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1569 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1570 brp_list[brp_1].value);
1571 if (retval != ERROR_OK)
1572 return retval;
1573 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1574 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1575 brp_list[brp_1].control);
1576 if (retval != ERROR_OK)
1577 return retval;
1578
1579 control_IVA = ((IVA_machmode & 0x7) << 20)
1580 | (brp_1 << 16)
1581 | (IVA_byte_addr_select << 5)
1582 | (3 << 1) | 1;
1583 brp_list[brp_2].used = 1;
1584 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1585 brp_list[brp_2].control = control_IVA;
1586 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1587 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1588 brp_list[brp_2].value);
1589 if (retval != ERROR_OK)
1590 return retval;
1591 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1592 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1593 brp_list[brp_2].control);
1594 if (retval != ERROR_OK)
1595 return retval;
1596
1597 return ERROR_OK;
1598 }
1599
1600 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1601 {
1602 int retval;
1603 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1604 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1605 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1606
1607 if (!breakpoint->set) {
1608 LOG_WARNING("breakpoint not set");
1609 return ERROR_OK;
1610 }
1611
1612 if (breakpoint->type == BKPT_HARD) {
1613 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1614 int brp_i = breakpoint->set - 1;
1615 int brp_j = breakpoint->linked_BRP;
1616 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1617 LOG_DEBUG("Invalid BRP number in breakpoint");
1618 return ERROR_OK;
1619 }
1620 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1621 brp_list[brp_i].control, brp_list[brp_i].value);
1622 brp_list[brp_i].used = 0;
1623 brp_list[brp_i].value = 0;
1624 brp_list[brp_i].control = 0;
1625 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1626 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1627 brp_list[brp_i].control);
1628 if (retval != ERROR_OK)
1629 return retval;
1630 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1631 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1632 brp_list[brp_i].value);
1633 if (retval != ERROR_OK)
1634 return retval;
1635 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1636 LOG_DEBUG("Invalid BRP number in breakpoint");
1637 return ERROR_OK;
1638 }
1639 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1640 brp_list[brp_j].control, brp_list[brp_j].value);
1641 brp_list[brp_j].used = 0;
1642 brp_list[brp_j].value = 0;
1643 brp_list[brp_j].control = 0;
1644 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1645 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1646 brp_list[brp_j].control);
1647 if (retval != ERROR_OK)
1648 return retval;
1649 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1650 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1651 brp_list[brp_j].value);
1652 if (retval != ERROR_OK)
1653 return retval;
1654 breakpoint->linked_BRP = 0;
1655 breakpoint->set = 0;
1656 return ERROR_OK;
1657
1658 } else {
1659 int brp_i = breakpoint->set - 1;
1660 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1661 LOG_DEBUG("Invalid BRP number in breakpoint");
1662 return ERROR_OK;
1663 }
1664 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1665 brp_list[brp_i].control, brp_list[brp_i].value);
1666 brp_list[brp_i].used = 0;
1667 brp_list[brp_i].value = 0;
1668 brp_list[brp_i].control = 0;
1669 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1670 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1671 brp_list[brp_i].control);
1672 if (retval != ERROR_OK)
1673 return retval;
1674 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1675 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1676 brp_list[brp_i].value);
1677 if (retval != ERROR_OK)
1678 return retval;
1679 breakpoint->set = 0;
1680 return ERROR_OK;
1681 }
1682 } else {
1683 /* restore original instruction (kept in target endianness) */
1684 if (breakpoint->length == 4) {
1685 retval = target_write_memory(target,
1686 breakpoint->address & 0xFFFFFFFE,
1687 4, 1, breakpoint->orig_instr);
1688 if (retval != ERROR_OK)
1689 return retval;
1690 } else {
1691 retval = target_write_memory(target,
1692 breakpoint->address & 0xFFFFFFFE,
1693 2, 1, breakpoint->orig_instr);
1694 if (retval != ERROR_OK)
1695 return retval;
1696 }
1697 }
1698 breakpoint->set = 0;
1699
1700 return ERROR_OK;
1701 }
1702
1703 static int cortex_a_add_breakpoint(struct target *target,
1704 struct breakpoint *breakpoint)
1705 {
1706 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1707
1708 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1709 LOG_INFO("no hardware breakpoint available");
1710 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1711 }
1712
1713 if (breakpoint->type == BKPT_HARD)
1714 cortex_a->brp_num_available--;
1715
1716 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1717 }
1718
1719 static int cortex_a_add_context_breakpoint(struct target *target,
1720 struct breakpoint *breakpoint)
1721 {
1722 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1723
1724 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1725 LOG_INFO("no hardware breakpoint available");
1726 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1727 }
1728
1729 if (breakpoint->type == BKPT_HARD)
1730 cortex_a->brp_num_available--;
1731
1732 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1733 }
1734
1735 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1736 struct breakpoint *breakpoint)
1737 {
1738 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1739
1740 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1741 LOG_INFO("no hardware breakpoint available");
1742 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1743 }
1744
1745 if (breakpoint->type == BKPT_HARD)
1746 cortex_a->brp_num_available--;
1747
1748 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1749 }
1750
1751
1752 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1753 {
1754 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1755
1756 #if 0
1757 /* It is perfectly possible to remove breakpoints while the target is running */
1758 if (target->state != TARGET_HALTED) {
1759 LOG_WARNING("target not halted");
1760 return ERROR_TARGET_NOT_HALTED;
1761 }
1762 #endif
1763
1764 if (breakpoint->set) {
1765 cortex_a_unset_breakpoint(target, breakpoint);
1766 if (breakpoint->type == BKPT_HARD)
1767 cortex_a->brp_num_available++;
1768 }
1769
1770
1771 return ERROR_OK;
1772 }
1773
1774 /*
1775 * Cortex-A Reset functions
1776 */
1777
1778 static int cortex_a_assert_reset(struct target *target)
1779 {
1780 struct armv7a_common *armv7a = target_to_armv7a(target);
1781
1782 LOG_DEBUG(" ");
1783
1784 /* FIXME when halt is requested, make it work somehow... */
1785
1786 /* Issue some kind of warm reset. */
1787 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1788 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1789 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1790 /* REVISIT handle "pulls" cases, if there's
1791 * hardware that needs them to work.
1792 */
1793 jtag_add_reset(0, 1);
1794 } else {
1795 LOG_ERROR("%s: how to reset?", target_name(target));
1796 return ERROR_FAIL;
1797 }
1798
1799 /* registers are now invalid */
1800 register_cache_invalidate(armv7a->arm.core_cache);
1801
1802 target->state = TARGET_RESET;
1803
1804 return ERROR_OK;
1805 }
1806
1807 static int cortex_a_deassert_reset(struct target *target)
1808 {
1809 int retval;
1810
1811 LOG_DEBUG(" ");
1812
1813 /* be certain SRST is off */
1814 jtag_add_reset(0, 0);
1815
1816 retval = cortex_a_poll(target);
1817 if (retval != ERROR_OK)
1818 return retval;
1819
1820 if (target->reset_halt) {
1821 if (target->state != TARGET_HALTED) {
1822 LOG_WARNING("%s: ran after reset and before halt ...",
1823 target_name(target));
1824 retval = target_halt(target);
1825 if (retval != ERROR_OK)
1826 return retval;
1827 }
1828 }
1829
1830 return ERROR_OK;
1831 }
1832
1833 static int cortex_a_write_apb_ab_memory(struct target *target,
1834 uint32_t address, uint32_t size,
1835 uint32_t count, const uint8_t *buffer)
1836 {
1837 /* write memory through APB-AP */
1838
1839 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1840 struct armv7a_common *armv7a = target_to_armv7a(target);
1841 struct arm *arm = &armv7a->arm;
1842 struct adiv5_dap *swjdp = armv7a->arm.dap;
1843 int total_bytes = count * size;
1844 int total_u32;
1845 int start_byte = address & 0x3;
1846 int end_byte = (address + total_bytes) & 0x3;
1847 struct reg *reg;
1848 uint32_t dscr;
1849 uint8_t *tmp_buff = NULL;
1850
1851
1852 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
1853 address, size, count);
1854 if (target->state != TARGET_HALTED) {
1855 LOG_WARNING("target not halted");
1856 return ERROR_TARGET_NOT_HALTED;
1857 }
1858
1859 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1860
1861 /* Mark register R0 as dirty, as it will be used
1862 * for transferring the data.
1863 * It will be restored automatically when exiting
1864 * debug mode
1865 */
1866 reg = arm_reg_current(arm, 0);
1867 reg->dirty = true;
1868
1869 /* clear any abort */
1870 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1871 if (retval != ERROR_OK)
1872 return retval;
1873
1874 /* This algorithm comes from either :
1875 * Cortex-A TRM Example 12-25
1876 * Cortex-R4 TRM Example 11-26
1877 * (slight differences)
1878 */
1879
1880 /* The algorithm only copies 32 bit words, so the buffer
1881 * should be expanded to include the words at either end.
1882 * The first and last words will be read first to avoid
1883 * corruption if needed.
1884 */
1885 tmp_buff = malloc(total_u32 * 4);
1886
1887 if ((start_byte != 0) && (total_u32 > 1)) {
1888 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1889 * the other bytes in the word.
1890 */
1891 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1892 if (retval != ERROR_OK)
1893 goto error_free_buff_w;
1894 }
1895
1896 /* If end of write is not aligned, or the write is less than 4 bytes */
1897 if ((end_byte != 0) ||
1898 ((total_u32 == 1) && (total_bytes != 4))) {
1899 /* Read the last word to avoid corruption during 32 bit write */
1900 int mem_offset = (total_u32-1) * 4;
1901 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1902 if (retval != ERROR_OK)
1903 goto error_free_buff_w;
1904 }
1905
1906 /* Copy the write buffer over the top of the temporary buffer */
1907 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1908
1909 /* We now have a 32 bit aligned buffer that can be written */
1910
1911 /* Read DSCR */
1912 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1913 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1914 if (retval != ERROR_OK)
1915 goto error_free_buff_w;
1916
1917 /* Set DTR mode to Fast (2) */
1918 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1919 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1920 armv7a->debug_base + CPUDBG_DSCR, dscr);
1921 if (retval != ERROR_OK)
1922 goto error_free_buff_w;
1923
1924 /* Copy the destination address into R0 */
1925 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1926 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1927 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1928 if (retval != ERROR_OK)
1929 goto error_unset_dtr_w;
1930 /* Write address into DTRRX, which triggers previous instruction */
1931 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1932 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1933 if (retval != ERROR_OK)
1934 goto error_unset_dtr_w;
1935
1936 /* Write the data transfer instruction into the ITR
1937 * (STC p14, c5, [R0], 4)
1938 */
1939 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1940 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1941 if (retval != ERROR_OK)
1942 goto error_unset_dtr_w;
1943
1944 /* Do the write */
1945 retval = mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap,
1946 tmp_buff, 4, total_u32, armv7a->debug_base + CPUDBG_DTRRX);
1947 if (retval != ERROR_OK)
1948 goto error_unset_dtr_w;
1949
1950
1951 /* Switch DTR mode back to non-blocking (0) */
1952 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1953 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1954 armv7a->debug_base + CPUDBG_DSCR, dscr);
1955 if (retval != ERROR_OK)
1956 goto error_unset_dtr_w;
1957
1958 /* Check for sticky abort flags in the DSCR */
1959 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1960 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1961 if (retval != ERROR_OK)
1962 goto error_free_buff_w;
1963 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1964 /* Abort occurred - clear it and exit */
1965 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1966 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1967 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1968 goto error_free_buff_w;
1969 }
1970
1971 /* Done */
1972 free(tmp_buff);
1973 return ERROR_OK;
1974
1975 error_unset_dtr_w:
1976 /* Unset DTR mode */
1977 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1978 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1979 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1980 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1981 armv7a->debug_base + CPUDBG_DSCR, dscr);
1982 error_free_buff_w:
1983 LOG_ERROR("error");
1984 free(tmp_buff);
1985 return ERROR_FAIL;
1986 }
1987
1988 static int cortex_a_read_apb_ab_memory(struct target *target,
1989 uint32_t address, uint32_t size,
1990 uint32_t count, uint8_t *buffer)
1991 {
1992 /* read memory through APB-AP */
1993
1994 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1995 struct armv7a_common *armv7a = target_to_armv7a(target);
1996 struct adiv5_dap *swjdp = armv7a->arm.dap;
1997 struct arm *arm = &armv7a->arm;
1998 int total_bytes = count * size;
1999 int total_u32;
2000 int start_byte = address & 0x3;
2001 int end_byte = (address + total_bytes) & 0x3;
2002 struct reg *reg;
2003 uint32_t dscr;
2004 uint8_t *tmp_buff = NULL;
2005 uint8_t buf[8];
2006 uint8_t *u8buf_ptr;
2007
2008 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
2009 address, size, count);
2010 if (target->state != TARGET_HALTED) {
2011 LOG_WARNING("target not halted");
2012 return ERROR_TARGET_NOT_HALTED;
2013 }
2014
2015 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
2016 /* Mark register R0 as dirty, as it will be used
2017 * for transferring the data.
2018 * It will be restored automatically when exiting
2019 * debug mode
2020 */
2021 reg = arm_reg_current(arm, 0);
2022 reg->dirty = true;
2023
2024 /* clear any abort */
2025 retval =
2026 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2027 if (retval != ERROR_OK)
2028 goto error_free_buff_r;
2029
2030 /* Read DSCR */
2031 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2032 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2033
2034 /* This algorithm comes from either :
2035 * Cortex-A TRM Example 12-24
2036 * Cortex-R4 TRM Example 11-25
2037 * (slight differences)
2038 */
2039
2040 /* Set DTR access mode to stall mode b01 */
2041 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
2042 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2043 armv7a->debug_base + CPUDBG_DSCR, dscr);
2044
2045 /* Write R0 with value 'address' using write procedure for stall mode */
2046 /* - Write the address for read access into DTRRX */
2047 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2048 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
2049 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
2050 cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2051
2052 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
2053 * and the DTR mode setting to fast mode
2054 * in one combined write (since they are adjacent registers)
2055 */
2056 u8buf_ptr = buf;
2057 target_buffer_set_u32(target, u8buf_ptr, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2058 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2059 target_buffer_set_u32(target, u8buf_ptr + 4, dscr);
2060 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2061 retval += mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, u8buf_ptr, 4, 2,
2062 armv7a->debug_base + CPUDBG_ITR);
2063 if (retval != ERROR_OK)
2064 goto error_unset_dtr_r;
2065
2066 /* Optimize the read as much as we can, either way we read in a single pass */
2067 if ((start_byte) || (end_byte)) {
2068 /* The algorithm only copies 32 bit words, so the buffer
2069 * should be expanded to include the words at either end.
2070 * The first and last words will be read into a temp buffer
2071 * to avoid corruption
2072 */
2073 tmp_buff = malloc(total_u32 * 4);
2074 if (!tmp_buff)
2075 goto error_unset_dtr_r;
2076
2077 /* use the tmp buffer to read the entire data */
2078 u8buf_ptr = tmp_buff;
2079 } else
2080 /* address and read length are aligned so read directely into the passed buffer */
2081 u8buf_ptr = buffer;
2082
2083 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2084 * Abort flags are sticky, so can be read at end of transactions
2085 *
2086 * This data is read in aligned to 32 bit boundary.
2087 */
2088 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, u8buf_ptr, 4, total_u32,
2089 armv7a->debug_base + CPUDBG_DTRTX);
2090 if (retval != ERROR_OK)
2091 goto error_unset_dtr_r;
2092
2093 /* set DTR access mode back to non blocking b00 */
2094 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2095 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2096 armv7a->debug_base + CPUDBG_DSCR, dscr);
2097 if (retval != ERROR_OK)
2098 goto error_free_buff_r;
2099
2100 /* Wait for the final read instruction to finish */
2101 do {
2102 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2103 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2104 if (retval != ERROR_OK)
2105 goto error_free_buff_r;
2106 } while ((dscr & DSCR_INSTR_COMP) == 0);
2107
2108 /* Check for sticky abort flags in the DSCR */
2109 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2110 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2111 if (retval != ERROR_OK)
2112 goto error_free_buff_r;
2113 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2114 /* Abort occurred - clear it and exit */
2115 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2116 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2117 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2118 goto error_free_buff_r;
2119 }
2120
2121 /* check if we need to copy aligned data by applying any shift necessary */
2122 if (tmp_buff) {
2123 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2124 free(tmp_buff);
2125 }
2126
2127 /* Done */
2128 return ERROR_OK;
2129
2130 error_unset_dtr_r:
2131 /* Unset DTR mode */
2132 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2133 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2134 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2135 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2136 armv7a->debug_base + CPUDBG_DSCR, dscr);
2137 error_free_buff_r:
2138 LOG_ERROR("error");
2139 free(tmp_buff);
2140 return ERROR_FAIL;
2141 }
2142
2143
2144 /*
2145 * Cortex-A Memory access
2146 *
2147 * This is same Cortex M3 but we must also use the correct
2148 * ap number for every access.
2149 */
2150
2151 static int cortex_a_read_phys_memory(struct target *target,
2152 uint32_t address, uint32_t size,
2153 uint32_t count, uint8_t *buffer)
2154 {
2155 struct armv7a_common *armv7a = target_to_armv7a(target);
2156 struct adiv5_dap *swjdp = armv7a->arm.dap;
2157 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2158 uint8_t apsel = swjdp->apsel;
2159 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2160 address, size, count);
2161
2162 if (count && buffer) {
2163
2164 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2165
2166 /* read memory through AHB-AP */
2167 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2168 } else {
2169
2170 /* read memory through APB-AP */
2171 if (!armv7a->is_armv7r) {
2172 /* disable mmu */
2173 retval = cortex_a_mmu_modify(target, 0);
2174 if (retval != ERROR_OK)
2175 return retval;
2176 }
2177 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2178 }
2179 }
2180 return retval;
2181 }
2182
2183 static int cortex_a_read_memory(struct target *target, uint32_t address,
2184 uint32_t size, uint32_t count, uint8_t *buffer)
2185 {
2186 int mmu_enabled = 0;
2187 uint32_t virt, phys;
2188 int retval;
2189 struct armv7a_common *armv7a = target_to_armv7a(target);
2190 struct adiv5_dap *swjdp = armv7a->arm.dap;
2191 uint8_t apsel = swjdp->apsel;
2192
2193 /* cortex_a handles unaligned memory access */
2194 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2195 size, count);
2196
2197 /* determine if MMU was enabled on target stop */
2198 if (!armv7a->is_armv7r) {
2199 retval = cortex_a_mmu(target, &mmu_enabled);
2200 if (retval != ERROR_OK)
2201 return retval;
2202 }
2203
2204 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2205 if (mmu_enabled) {
2206 virt = address;
2207 retval = cortex_a_virt2phys(target, virt, &phys);
2208 if (retval != ERROR_OK)
2209 return retval;
2210
2211 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2212 virt, phys);
2213 address = phys;
2214 }
2215 retval = cortex_a_read_phys_memory(target, address, size,
2216 count, buffer);
2217 } else {
2218 if (mmu_enabled) {
2219 retval = cortex_a_check_address(target, address);
2220 if (retval != ERROR_OK)
2221 return retval;
2222 /* enable MMU as we could have disabled it for phys access */
2223 retval = cortex_a_mmu_modify(target, 1);
2224 if (retval != ERROR_OK)
2225 return retval;
2226 }
2227 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2228 }
2229 return retval;
2230 }
2231
2232 static int cortex_a_write_phys_memory(struct target *target,
2233 uint32_t address, uint32_t size,
2234 uint32_t count, const uint8_t *buffer)
2235 {
2236 struct armv7a_common *armv7a = target_to_armv7a(target);
2237 struct adiv5_dap *swjdp = armv7a->arm.dap;
2238 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2239 uint8_t apsel = swjdp->apsel;
2240
2241 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2242 size, count);
2243
2244 if (count && buffer) {
2245
2246 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2247
2248 /* write memory through AHB-AP */
2249 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2250 } else {
2251
2252 /* write memory through APB-AP */
2253 if (!armv7a->is_armv7r) {
2254 retval = cortex_a_mmu_modify(target, 0);
2255 if (retval != ERROR_OK)
2256 return retval;
2257 }
2258 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2259 }
2260 }
2261
2262
2263 /* REVISIT this op is generic ARMv7-A/R stuff */
2264 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2265 struct arm_dpm *dpm = armv7a->arm.dpm;
2266
2267 retval = dpm->prepare(dpm);
2268 if (retval != ERROR_OK)
2269 return retval;
2270
2271 /* The Cache handling will NOT work with MMU active, the
2272 * wrong addresses will be invalidated!
2273 *
2274 * For both ICache and DCache, walk all cache lines in the
2275 * address range. Cortex-A has fixed 64 byte line length.
2276 *
2277 * REVISIT per ARMv7, these may trigger watchpoints ...
2278 */
2279
2280 /* invalidate I-Cache */
2281 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2282 /* ICIMVAU - Invalidate Cache single entry
2283 * with MVA to PoU
2284 * MCR p15, 0, r0, c7, c5, 1
2285 */
2286 for (uint32_t cacheline = 0;
2287 cacheline < size * count;
2288 cacheline += 64) {
2289 retval = dpm->instr_write_data_r0(dpm,
2290 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2291 address + cacheline);
2292 if (retval != ERROR_OK)
2293 return retval;
2294 }
2295 }
2296
2297 /* invalidate D-Cache */
2298 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2299 /* DCIMVAC - Invalidate data Cache line
2300 * with MVA to PoC
2301 * MCR p15, 0, r0, c7, c6, 1
2302 */
2303 for (uint32_t cacheline = 0;
2304 cacheline < size * count;
2305 cacheline += 64) {
2306 retval = dpm->instr_write_data_r0(dpm,
2307 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2308 address + cacheline);
2309 if (retval != ERROR_OK)
2310 return retval;
2311 }
2312 }
2313
2314 /* (void) */ dpm->finish(dpm);
2315 }
2316
2317 return retval;
2318 }
2319
2320 static int cortex_a_write_memory(struct target *target, uint32_t address,
2321 uint32_t size, uint32_t count, const uint8_t *buffer)
2322 {
2323 int mmu_enabled = 0;
2324 uint32_t virt, phys;
2325 int retval;
2326 struct armv7a_common *armv7a = target_to_armv7a(target);
2327 struct adiv5_dap *swjdp = armv7a->arm.dap;
2328 uint8_t apsel = swjdp->apsel;
2329
2330 /* cortex_a handles unaligned memory access */
2331 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2332 size, count);
2333
2334 /* determine if MMU was enabled on target stop */
2335 if (!armv7a->is_armv7r) {
2336 retval = cortex_a_mmu(target, &mmu_enabled);
2337 if (retval != ERROR_OK)
2338 return retval;
2339 }
2340
2341 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2342 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2343 count);
2344 if (mmu_enabled) {
2345 virt = address;
2346 retval = cortex_a_virt2phys(target, virt, &phys);
2347 if (retval != ERROR_OK)
2348 return retval;
2349
2350 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2351 virt,
2352 phys);
2353 address = phys;
2354 }
2355 retval = cortex_a_write_phys_memory(target, address, size,
2356 count, buffer);
2357 } else {
2358 if (mmu_enabled) {
2359 retval = cortex_a_check_address(target, address);
2360 if (retval != ERROR_OK)
2361 return retval;
2362 /* enable MMU as we could have disabled it for phys access */
2363 retval = cortex_a_mmu_modify(target, 1);
2364 if (retval != ERROR_OK)
2365 return retval;
2366 }
2367 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2368 }
2369 return retval;
2370 }
2371
2372 static int cortex_a_handle_target_request(void *priv)
2373 {
2374 struct target *target = priv;
2375 struct armv7a_common *armv7a = target_to_armv7a(target);
2376 struct adiv5_dap *swjdp = armv7a->arm.dap;
2377 int retval;
2378
2379 if (!target_was_examined(target))
2380 return ERROR_OK;
2381 if (!target->dbg_msg_enabled)
2382 return ERROR_OK;
2383
2384 if (target->state == TARGET_RUNNING) {
2385 uint32_t request;
2386 uint32_t dscr;
2387 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2388 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2389
2390 /* check if we have data */
2391 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2392 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2393 armv7a->debug_base + CPUDBG_DTRTX, &request);
2394 if (retval == ERROR_OK) {
2395 target_request(target, request);
2396 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2397 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2398 }
2399 }
2400 }
2401
2402 return ERROR_OK;
2403 }
2404
2405 /*
2406 * Cortex-A target information and configuration
2407 */
2408
2409 static int cortex_a_examine_first(struct target *target)
2410 {
2411 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2412 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2413 struct adiv5_dap *swjdp = armv7a->arm.dap;
2414 int i;
2415 int retval = ERROR_OK;
2416 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2417
2418 /* We do one extra read to ensure DAP is configured,
2419 * we call ahbap_debugport_init(swjdp) instead
2420 */
2421 retval = ahbap_debugport_init(swjdp);
2422 if (retval != ERROR_OK)
2423 return retval;
2424
2425 /* Search for the APB-AB - it is needed for access to debug registers */
2426 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2427 if (retval != ERROR_OK) {
2428 LOG_ERROR("Could not find APB-AP for debug access");
2429 return retval;
2430 }
2431 /* Search for the AHB-AB */
2432 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2433 if (retval != ERROR_OK) {
2434 /* AHB-AP not found - use APB-AP */
2435 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2436 armv7a->memory_ap_available = false;
2437 } else {
2438 armv7a->memory_ap_available = true;
2439 }
2440
2441
2442 if (!target->dbgbase_set) {
2443 uint32_t dbgbase;
2444 /* Get ROM Table base */
2445 uint32_t apid;
2446 int32_t coreidx = target->coreid;
2447 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2448 target->cmd_name);
2449 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2450 if (retval != ERROR_OK)
2451 return retval;
2452 /* Lookup 0x15 -- Processor DAP */
2453 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2454 &armv7a->debug_base, &coreidx);
2455 if (retval != ERROR_OK)
2456 return retval;
2457 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2458 coreidx, armv7a->debug_base);
2459 } else
2460 armv7a->debug_base = target->dbgbase;
2461
2462 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2463 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2464 if (retval != ERROR_OK)
2465 return retval;
2466
2467 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2468 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2469 if (retval != ERROR_OK) {
2470 LOG_DEBUG("Examine %s failed", "CPUID");
2471 return retval;
2472 }
2473
2474 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2475 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2476 if (retval != ERROR_OK) {
2477 LOG_DEBUG("Examine %s failed", "CTYPR");
2478 return retval;
2479 }
2480
2481 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2482 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2483 if (retval != ERROR_OK) {
2484 LOG_DEBUG("Examine %s failed", "TTYPR");
2485 return retval;
2486 }
2487
2488 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2489 armv7a->debug_base + CPUDBG_DIDR, &didr);
2490 if (retval != ERROR_OK) {
2491 LOG_DEBUG("Examine %s failed", "DIDR");
2492 return retval;
2493 }
2494
2495 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2496 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2497 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2498 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2499
2500 cortex_a->cpuid = cpuid;
2501 cortex_a->ctypr = ctypr;
2502 cortex_a->ttypr = ttypr;
2503 cortex_a->didr = didr;
2504
2505 /* Unlocking the debug registers */
2506 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2507 CORTEX_A15_PARTNUM) {
2508
2509 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2510 armv7a->debug_base + CPUDBG_OSLAR,
2511 0);
2512
2513 if (retval != ERROR_OK)
2514 return retval;
2515
2516 }
2517 /* Unlocking the debug registers */
2518 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2519 CORTEX_A7_PARTNUM) {
2520
2521 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2522 armv7a->debug_base + CPUDBG_OSLAR,
2523 0);
2524
2525 if (retval != ERROR_OK)
2526 return retval;
2527
2528 }
2529 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2530 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2531
2532 if (retval != ERROR_OK)
2533 return retval;
2534
2535 LOG_DEBUG("target->coreid %d DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2536
2537 armv7a->arm.core_type = ARM_MODE_MON;
2538 retval = cortex_a_dpm_setup(cortex_a, didr);
2539 if (retval != ERROR_OK)
2540 return retval;
2541
2542 /* Setup Breakpoint Register Pairs */
2543 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2544 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2545 cortex_a->brp_num_available = cortex_a->brp_num;
2546 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2547 /* cortex_a->brb_enabled = ????; */
2548 for (i = 0; i < cortex_a->brp_num; i++) {
2549 cortex_a->brp_list[i].used = 0;
2550 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2551 cortex_a->brp_list[i].type = BRP_NORMAL;
2552 else
2553 cortex_a->brp_list[i].type = BRP_CONTEXT;
2554 cortex_a->brp_list[i].value = 0;
2555 cortex_a->brp_list[i].control = 0;
2556 cortex_a->brp_list[i].BRPn = i;
2557 }
2558
2559 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2560
2561 target_set_examined(target);
2562 return ERROR_OK;
2563 }
2564
2565 static int cortex_a_examine(struct target *target)
2566 {
2567 int retval = ERROR_OK;
2568
2569 /* don't re-probe hardware after each reset */
2570 if (!target_was_examined(target))
2571 retval = cortex_a_examine_first(target);
2572
2573 /* Configure core debug access */
2574 if (retval == ERROR_OK)
2575 retval = cortex_a_init_debug_access(target);
2576
2577 return retval;
2578 }
2579
2580 /*
2581 * Cortex-A target creation and initialization
2582 */
2583
2584 static int cortex_a_init_target(struct command_context *cmd_ctx,
2585 struct target *target)
2586 {
2587 /* examine_first() does a bunch of this */
2588 return ERROR_OK;
2589 }
2590
2591 static int cortex_a_init_arch_info(struct target *target,
2592 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
2593 {
2594 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2595 struct adiv5_dap *dap = &armv7a->dap;
2596
2597 armv7a->arm.dap = dap;
2598
2599 /* Setup struct cortex_a_common */
2600 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2601 /* tap has no dap initialized */
2602 if (!tap->dap) {
2603 armv7a->arm.dap = dap;
2604 /* Setup struct cortex_a_common */
2605
2606 /* prepare JTAG information for the new target */
2607 cortex_a->jtag_info.tap = tap;
2608 cortex_a->jtag_info.scann_size = 4;
2609
2610 /* Leave (only) generic DAP stuff for debugport_init() */
2611 dap->jtag_info = &cortex_a->jtag_info;
2612
2613 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2614 dap->tar_autoincr_block = (1 << 10);
2615 dap->memaccess_tck = 80;
2616 tap->dap = dap;
2617 } else
2618 armv7a->arm.dap = tap->dap;
2619
2620 cortex_a->fast_reg_read = 0;
2621
2622 /* register arch-specific functions */
2623 armv7a->examine_debug_reason = NULL;
2624
2625 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2626
2627 armv7a->pre_restore_context = NULL;
2628
2629 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2630
2631
2632 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2633
2634 /* REVISIT v7a setup should be in a v7a-specific routine */
2635 armv7a_init_arch_info(target, armv7a);
2636 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
2637
2638 return ERROR_OK;
2639 }
2640
2641 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2642 {
2643 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2644
2645 cortex_a->armv7a_common.is_armv7r = false;
2646
2647 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2648 }
2649
2650 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2651 {
2652 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2653
2654 cortex_a->armv7a_common.is_armv7r = true;
2655
2656 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2657 }
2658
2659
2660 static int cortex_a_mmu(struct target *target, int *enabled)
2661 {
2662 if (target->state != TARGET_HALTED) {
2663 LOG_ERROR("%s: target not halted", __func__);
2664 return ERROR_TARGET_INVALID;
2665 }
2666
2667 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2668 return ERROR_OK;
2669 }
2670
2671 static int cortex_a_virt2phys(struct target *target,
2672 uint32_t virt, uint32_t *phys)
2673 {
2674 int retval = ERROR_FAIL;
2675 struct armv7a_common *armv7a = target_to_armv7a(target);
2676 struct adiv5_dap *swjdp = armv7a->arm.dap;
2677 uint8_t apsel = swjdp->apsel;
2678 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2679 uint32_t ret;
2680 retval = armv7a_mmu_translate_va(target,
2681 virt, &ret);
2682 if (retval != ERROR_OK)
2683 goto done;
2684 *phys = ret;
2685 } else {/* use this method if armv7a->memory_ap not selected
2686 * mmu must be enable in order to get a correct translation */
2687 retval = cortex_a_mmu_modify(target, 1);
2688 if (retval != ERROR_OK)
2689 goto done;
2690 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2691 }
2692 done:
2693 return retval;
2694 }
2695
2696 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2697 {
2698 struct target *target = get_current_target(CMD_CTX);
2699 struct armv7a_common *armv7a = target_to_armv7a(target);
2700
2701 return armv7a_handle_cache_info_command(CMD_CTX,
2702 &armv7a->armv7a_mmu.armv7a_cache);
2703 }
2704
2705
2706 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2707 {
2708 struct target *target = get_current_target(CMD_CTX);
2709 if (!target_was_examined(target)) {
2710 LOG_ERROR("target not examined yet");
2711 return ERROR_FAIL;
2712 }
2713
2714 return cortex_a_init_debug_access(target);
2715 }
2716 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2717 {
2718 struct target *target = get_current_target(CMD_CTX);
2719 /* check target is an smp target */
2720 struct target_list *head;
2721 struct target *curr;
2722 head = target->head;
2723 target->smp = 0;
2724 if (head != (struct target_list *)NULL) {
2725 while (head != (struct target_list *)NULL) {
2726 curr = head->target;
2727 curr->smp = 0;
2728 head = head->next;
2729 }
2730 /* fixes the target display to the debugger */
2731 target->gdb_service->target = target;
2732 }
2733 return ERROR_OK;
2734 }
2735
2736 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2737 {
2738 struct target *target = get_current_target(CMD_CTX);
2739 struct target_list *head;
2740 struct target *curr;
2741 head = target->head;
2742 if (head != (struct target_list *)NULL) {
2743 target->smp = 1;
2744 while (head != (struct target_list *)NULL) {
2745 curr = head->target;
2746 curr->smp = 1;
2747 head = head->next;
2748 }
2749 }
2750 return ERROR_OK;
2751 }
2752
2753 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
2754 {
2755 struct target *target = get_current_target(CMD_CTX);
2756 int retval = ERROR_OK;
2757 struct target_list *head;
2758 head = target->head;
2759 if (head != (struct target_list *)NULL) {
2760 if (CMD_ARGC == 1) {
2761 int coreid = 0;
2762 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2763 if (ERROR_OK != retval)
2764 return retval;
2765 target->gdb_service->core[1] = coreid;
2766
2767 }
2768 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2769 , target->gdb_service->core[1]);
2770 }
2771 return ERROR_OK;
2772 }
2773
2774 static const struct command_registration cortex_a_exec_command_handlers[] = {
2775 {
2776 .name = "cache_info",
2777 .handler = cortex_a_handle_cache_info_command,
2778 .mode = COMMAND_EXEC,
2779 .help = "display information about target caches",
2780 .usage = "",
2781 },
2782 {
2783 .name = "dbginit",
2784 .handler = cortex_a_handle_dbginit_command,
2785 .mode = COMMAND_EXEC,
2786 .help = "Initialize core debug",
2787 .usage = "",
2788 },
2789 { .name = "smp_off",
2790 .handler = cortex_a_handle_smp_off_command,
2791 .mode = COMMAND_EXEC,
2792 .help = "Stop smp handling",
2793 .usage = "",},
2794 {
2795 .name = "smp_on",
2796 .handler = cortex_a_handle_smp_on_command,
2797 .mode = COMMAND_EXEC,
2798 .help = "Restart smp handling",
2799 .usage = "",
2800 },
2801 {
2802 .name = "smp_gdb",
2803 .handler = cortex_a_handle_smp_gdb_command,
2804 .mode = COMMAND_EXEC,
2805 .help = "display/fix current core played to gdb",
2806 .usage = "",
2807 },
2808
2809
2810 COMMAND_REGISTRATION_DONE
2811 };
2812 static const struct command_registration cortex_a_command_handlers[] = {
2813 {
2814 .chain = arm_command_handlers,
2815 },
2816 {
2817 .chain = armv7a_command_handlers,
2818 },
2819 {
2820 .name = "cortex_a",
2821 .mode = COMMAND_ANY,
2822 .help = "Cortex-A command group",
2823 .usage = "",
2824 .chain = cortex_a_exec_command_handlers,
2825 },
2826 COMMAND_REGISTRATION_DONE
2827 };
2828
2829 struct target_type cortexa_target = {
2830 .name = "cortex_a",
2831 .deprecated_name = "cortex_a8",
2832
2833 .poll = cortex_a_poll,
2834 .arch_state = armv7a_arch_state,
2835
2836 .halt = cortex_a_halt,
2837 .resume = cortex_a_resume,
2838 .step = cortex_a_step,
2839
2840 .assert_reset = cortex_a_assert_reset,
2841 .deassert_reset = cortex_a_deassert_reset,
2842
2843 /* REVISIT allow exporting VFP3 registers ... */
2844 .get_gdb_reg_list = arm_get_gdb_reg_list,
2845
2846 .read_memory = cortex_a_read_memory,
2847 .write_memory = cortex_a_write_memory,
2848
2849 .checksum_memory = arm_checksum_memory,
2850 .blank_check_memory = arm_blank_check_memory,
2851
2852 .run_algorithm = armv4_5_run_algorithm,
2853
2854 .add_breakpoint = cortex_a_add_breakpoint,
2855 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2856 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2857 .remove_breakpoint = cortex_a_remove_breakpoint,
2858 .add_watchpoint = NULL,
2859 .remove_watchpoint = NULL,
2860
2861 .commands = cortex_a_command_handlers,
2862 .target_create = cortex_a_target_create,
2863 .init_target = cortex_a_init_target,
2864 .examine = cortex_a_examine,
2865
2866 .read_phys_memory = cortex_a_read_phys_memory,
2867 .write_phys_memory = cortex_a_write_phys_memory,
2868 .mmu = cortex_a_mmu,
2869 .virt2phys = cortex_a_virt2phys,
2870 };
2871
2872 static const struct command_registration cortex_r4_exec_command_handlers[] = {
2873 {
2874 .name = "cache_info",
2875 .handler = cortex_a_handle_cache_info_command,
2876 .mode = COMMAND_EXEC,
2877 .help = "display information about target caches",
2878 .usage = "",
2879 },
2880 {
2881 .name = "dbginit",
2882 .handler = cortex_a_handle_dbginit_command,
2883 .mode = COMMAND_EXEC,
2884 .help = "Initialize core debug",
2885 .usage = "",
2886 },
2887
2888 COMMAND_REGISTRATION_DONE
2889 };
2890 static const struct command_registration cortex_r4_command_handlers[] = {
2891 {
2892 .chain = arm_command_handlers,
2893 },
2894 {
2895 .chain = armv7a_command_handlers,
2896 },
2897 {
2898 .name = "cortex_r4",
2899 .mode = COMMAND_ANY,
2900 .help = "Cortex-R4 command group",
2901 .usage = "",
2902 .chain = cortex_r4_exec_command_handlers,
2903 },
2904 COMMAND_REGISTRATION_DONE
2905 };
2906
2907 struct target_type cortexr4_target = {
2908 .name = "cortex_r4",
2909
2910 .poll = cortex_a_poll,
2911 .arch_state = armv7a_arch_state,
2912
2913 .halt = cortex_a_halt,
2914 .resume = cortex_a_resume,
2915 .step = cortex_a_step,
2916
2917 .assert_reset = cortex_a_assert_reset,
2918 .deassert_reset = cortex_a_deassert_reset,
2919
2920 /* REVISIT allow exporting VFP3 registers ... */
2921 .get_gdb_reg_list = arm_get_gdb_reg_list,
2922
2923 .read_memory = cortex_a_read_memory,
2924 .write_memory = cortex_a_write_memory,
2925
2926 .checksum_memory = arm_checksum_memory,
2927 .blank_check_memory = arm_blank_check_memory,
2928
2929 .run_algorithm = armv4_5_run_algorithm,
2930
2931 .add_breakpoint = cortex_a_add_breakpoint,
2932 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2933 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2934 .remove_breakpoint = cortex_a_remove_breakpoint,
2935 .add_watchpoint = NULL,
2936 .remove_watchpoint = NULL,
2937
2938 .commands = cortex_r4_command_handlers,
2939 .target_create = cortex_r4_target_create,
2940 .init_target = cortex_a_init_target,
2941 .examine = cortex_a_examine,
2942 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)