9b8ba41665fb4a517ec9c8c9854f8d697f20ce81
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39 #ifdef HAVE_CONFIG_H
40 #include "config.h"
41 #endif
42
43 #include "breakpoints.h"
44 #include "cortex_a.h"
45 #include "register.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_opcodes.h"
49 #include <helper/time_support.h>
50
51 static int cortex_a8_poll(struct target *target);
52 static int cortex_a8_debug_entry(struct target *target);
53 static int cortex_a8_restore_context(struct target *target, bool bpwp);
54 static int cortex_a8_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int cortex_a8_unset_breakpoint(struct target *target,
57 struct breakpoint *breakpoint);
58 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
59 uint32_t *value, int regnum);
60 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
61 uint32_t value, int regnum);
62 static int cortex_a8_mmu(struct target *target, int *enabled);
63 static int cortex_a8_virt2phys(struct target *target,
64 uint32_t virt, uint32_t *phys);
65 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
66 int d_u_cache, int i_cache);
67 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
68 int d_u_cache, int i_cache);
69 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
70
71
72 /*
73 * FIXME do topology discovery using the ROM; don't
74 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
75 * cores, with different AP numbering ... don't use a #define
76 * for these numbers, use per-core armv7a state.
77 */
78 #define swjdp_memoryap 0
79 #define swjdp_debugap 1
80
81 /*
82 * Cortex-A8 Basic debug access, very low level assumes state is saved
83 */
84 static int cortex_a8_init_debug_access(struct target *target)
85 {
86 struct armv7a_common *armv7a = target_to_armv7a(target);
87 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
88 int retval;
89 uint32_t dummy;
90
91 LOG_DEBUG(" ");
92
93 /* Unlocking the debug registers for modification */
94 /* The debugport might be uninitialised so try twice */
95 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
96 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
97 if (retval != ERROR_OK)
98 {
99 /* try again */
100 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
101 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
102 if (retval == ERROR_OK)
103 {
104 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
105 }
106 }
107 if (retval != ERROR_OK)
108 return retval;
109 /* Clear Sticky Power Down status Bit in PRSR to enable access to
110 the registers in the Core Power Domain */
111 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
112 armv7a->debug_base + CPUDBG_PRSR, &dummy);
113 if (retval != ERROR_OK)
114 return retval;
115
116 /* Enabling of instruction execution in debug mode is done in debug_entry code */
117
118 /* Resync breakpoint registers */
119
120 /* Since this is likely called from init or reset, update target state information*/
121 return cortex_a8_poll(target);
122 }
123
124 /* To reduce needless round-trips, pass in a pointer to the current
125 * DSCR value. Initialize it to zero if you just need to know the
126 * value on return from this function; or DSCR_INSTR_COMP if you
127 * happen to know that no instruction is pending.
128 */
129 static int cortex_a8_exec_opcode(struct target *target,
130 uint32_t opcode, uint32_t *dscr_p)
131 {
132 uint32_t dscr;
133 int retval;
134 struct armv7a_common *armv7a = target_to_armv7a(target);
135 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
136
137 dscr = dscr_p ? *dscr_p : 0;
138
139 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
140
141 /* Wait for InstrCompl bit to be set */
142 long long then = timeval_ms();
143 while ((dscr & DSCR_INSTR_COMP) == 0)
144 {
145 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
146 armv7a->debug_base + CPUDBG_DSCR, &dscr);
147 if (retval != ERROR_OK)
148 {
149 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
150 return retval;
151 }
152 if (timeval_ms() > then + 1000)
153 {
154 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
155 return ERROR_FAIL;
156 }
157 }
158
159 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
160 armv7a->debug_base + CPUDBG_ITR, opcode);
161 if (retval != ERROR_OK)
162 return retval;
163
164 then = timeval_ms();
165 do
166 {
167 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
168 armv7a->debug_base + CPUDBG_DSCR, &dscr);
169 if (retval != ERROR_OK)
170 {
171 LOG_ERROR("Could not read DSCR register");
172 return retval;
173 }
174 if (timeval_ms() > then + 1000)
175 {
176 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
177 return ERROR_FAIL;
178 }
179 }
180 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
181
182 if (dscr_p)
183 *dscr_p = dscr;
184
185 return retval;
186 }
187
188 /**************************************************************************
189 Read core register with very few exec_opcode, fast but needs work_area.
190 This can cause problems with MMU active.
191 **************************************************************************/
192 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
193 uint32_t * regfile)
194 {
195 int retval = ERROR_OK;
196 struct armv7a_common *armv7a = target_to_armv7a(target);
197 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
198
199 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
203 if (retval != ERROR_OK)
204 return retval;
205 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
206 if (retval != ERROR_OK)
207 return retval;
208
209 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
210 (uint8_t *)(&regfile[1]), 4*15, address);
211
212 return retval;
213 }
214
215 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
216 uint32_t *value, int regnum)
217 {
218 int retval = ERROR_OK;
219 uint8_t reg = regnum&0xFF;
220 uint32_t dscr = 0;
221 struct armv7a_common *armv7a = target_to_armv7a(target);
222 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
223
224 if (reg > 17)
225 return retval;
226
227 if (reg < 15)
228 {
229 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
230 retval = cortex_a8_exec_opcode(target,
231 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
232 &dscr);
233 if (retval != ERROR_OK)
234 return retval;
235 }
236 else if (reg == 15)
237 {
238 /* "MOV r0, r15"; then move r0 to DCCTX */
239 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
240 if (retval != ERROR_OK)
241 return retval;
242 retval = cortex_a8_exec_opcode(target,
243 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
244 &dscr);
245 if (retval != ERROR_OK)
246 return retval;
247 }
248 else
249 {
250 /* "MRS r0, CPSR" or "MRS r0, SPSR"
251 * then move r0 to DCCTX
252 */
253 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
254 if (retval != ERROR_OK)
255 return retval;
256 retval = cortex_a8_exec_opcode(target,
257 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
258 &dscr);
259 if (retval != ERROR_OK)
260 return retval;
261 }
262
263 /* Wait for DTRRXfull then read DTRRTX */
264 long long then = timeval_ms();
265 while ((dscr & DSCR_DTR_TX_FULL) == 0)
266 {
267 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
268 armv7a->debug_base + CPUDBG_DSCR, &dscr);
269 if (retval != ERROR_OK)
270 return retval;
271 if (timeval_ms() > then + 1000)
272 {
273 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
274 return ERROR_FAIL;
275 }
276 }
277
278 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
279 armv7a->debug_base + CPUDBG_DTRTX, value);
280 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
281
282 return retval;
283 }
284
285 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
286 uint32_t value, int regnum)
287 {
288 int retval = ERROR_OK;
289 uint8_t Rd = regnum&0xFF;
290 uint32_t dscr;
291 struct armv7a_common *armv7a = target_to_armv7a(target);
292 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
293
294 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
295
296 /* Check that DCCRX is not full */
297 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
298 armv7a->debug_base + CPUDBG_DSCR, &dscr);
299 if (retval != ERROR_OK)
300 return retval;
301 if (dscr & DSCR_DTR_RX_FULL)
302 {
303 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
304 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
305 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
309 }
310
311 if (Rd > 17)
312 return retval;
313
314 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
315 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
316 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
317 armv7a->debug_base + CPUDBG_DTRRX, value);
318 if (retval != ERROR_OK)
319 return retval;
320
321 if (Rd < 15)
322 {
323 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
324 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
325 &dscr);
326
327 if (retval != ERROR_OK)
328 return retval;
329 }
330 else if (Rd == 15)
331 {
332 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
333 * then "mov r15, r0"
334 */
335 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
336 &dscr);
337 if (retval != ERROR_OK)
338 return retval;
339 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
340 if (retval != ERROR_OK)
341 return retval;
342 }
343 else
344 {
345 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
346 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
347 */
348 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
349 &dscr);
350 if (retval != ERROR_OK)
351 return retval;
352 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
353 &dscr);
354 if (retval != ERROR_OK)
355 return retval;
356
357 /* "Prefetch flush" after modifying execution status in CPSR */
358 if (Rd == 16)
359 {
360 retval = cortex_a8_exec_opcode(target,
361 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
362 &dscr);
363 if (retval != ERROR_OK)
364 return retval;
365 }
366 }
367
368 return retval;
369 }
370
371 /* Write to memory mapped registers directly with no cache or mmu handling */
372 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
373 {
374 int retval;
375 struct armv7a_common *armv7a = target_to_armv7a(target);
376 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
377
378 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
379
380 return retval;
381 }
382
383 /*
384 * Cortex-A8 implementation of Debug Programmer's Model
385 *
386 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
387 * so there's no need to poll for it before executing an instruction.
388 *
389 * NOTE that in several of these cases the "stall" mode might be useful.
390 * It'd let us queue a few operations together... prepare/finish might
391 * be the places to enable/disable that mode.
392 */
393
394 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
395 {
396 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
397 }
398
399 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
400 {
401 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
402 return mem_ap_sel_write_u32(a8->armv7a_common.armv4_5_common.dap,
403 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
404 }
405
406 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
407 uint32_t *dscr_p)
408 {
409 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
410 uint32_t dscr = DSCR_INSTR_COMP;
411 int retval;
412
413 if (dscr_p)
414 dscr = *dscr_p;
415
416 /* Wait for DTRRXfull */
417 long long then = timeval_ms();
418 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
419 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
420 a8->armv7a_common.debug_base + CPUDBG_DSCR,
421 &dscr);
422 if (retval != ERROR_OK)
423 return retval;
424 if (timeval_ms() > then + 1000)
425 {
426 LOG_ERROR("Timeout waiting for read dcc");
427 return ERROR_FAIL;
428 }
429 }
430
431 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
432 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
433 if (retval != ERROR_OK)
434 return retval;
435 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
436
437 if (dscr_p)
438 *dscr_p = dscr;
439
440 return retval;
441 }
442
443 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
444 {
445 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
446 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
447 uint32_t dscr;
448 int retval;
449
450 /* set up invariant: INSTR_COMP is set after ever DPM operation */
451 long long then = timeval_ms();
452 for (;;)
453 {
454 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
455 a8->armv7a_common.debug_base + CPUDBG_DSCR,
456 &dscr);
457 if (retval != ERROR_OK)
458 return retval;
459 if ((dscr & DSCR_INSTR_COMP) != 0)
460 break;
461 if (timeval_ms() > then + 1000)
462 {
463 LOG_ERROR("Timeout waiting for dpm prepare");
464 return ERROR_FAIL;
465 }
466 }
467
468 /* this "should never happen" ... */
469 if (dscr & DSCR_DTR_RX_FULL) {
470 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
471 /* Clear DCCRX */
472 retval = cortex_a8_exec_opcode(
473 a8->armv7a_common.armv4_5_common.target,
474 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
475 &dscr);
476 if (retval != ERROR_OK)
477 return retval;
478 }
479
480 return retval;
481 }
482
483 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
484 {
485 /* REVISIT what could be done here? */
486 return ERROR_OK;
487 }
488
489 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
490 uint32_t opcode, uint32_t data)
491 {
492 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
493 int retval;
494 uint32_t dscr = DSCR_INSTR_COMP;
495
496 retval = cortex_a8_write_dcc(a8, data);
497 if (retval != ERROR_OK)
498 return retval;
499
500 return cortex_a8_exec_opcode(
501 a8->armv7a_common.armv4_5_common.target,
502 opcode,
503 &dscr);
504 }
505
506 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
507 uint32_t opcode, uint32_t data)
508 {
509 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
510 uint32_t dscr = DSCR_INSTR_COMP;
511 int retval;
512
513 retval = cortex_a8_write_dcc(a8, data);
514 if (retval != ERROR_OK)
515 return retval;
516
517 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
518 retval = cortex_a8_exec_opcode(
519 a8->armv7a_common.armv4_5_common.target,
520 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
521 &dscr);
522 if (retval != ERROR_OK)
523 return retval;
524
525 /* then the opcode, taking data from R0 */
526 retval = cortex_a8_exec_opcode(
527 a8->armv7a_common.armv4_5_common.target,
528 opcode,
529 &dscr);
530
531 return retval;
532 }
533
534 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
535 {
536 struct target *target = dpm->arm->target;
537 uint32_t dscr = DSCR_INSTR_COMP;
538
539 /* "Prefetch flush" after modifying execution status in CPSR */
540 return cortex_a8_exec_opcode(target,
541 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
542 &dscr);
543 }
544
545 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
546 uint32_t opcode, uint32_t *data)
547 {
548 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
549 int retval;
550 uint32_t dscr = DSCR_INSTR_COMP;
551
552 /* the opcode, writing data to DCC */
553 retval = cortex_a8_exec_opcode(
554 a8->armv7a_common.armv4_5_common.target,
555 opcode,
556 &dscr);
557 if (retval != ERROR_OK)
558 return retval;
559
560 return cortex_a8_read_dcc(a8, data, &dscr);
561 }
562
563
564 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
565 uint32_t opcode, uint32_t *data)
566 {
567 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
568 uint32_t dscr = DSCR_INSTR_COMP;
569 int retval;
570
571 /* the opcode, writing data to R0 */
572 retval = cortex_a8_exec_opcode(
573 a8->armv7a_common.armv4_5_common.target,
574 opcode,
575 &dscr);
576 if (retval != ERROR_OK)
577 return retval;
578
579 /* write R0 to DCC */
580 retval = cortex_a8_exec_opcode(
581 a8->armv7a_common.armv4_5_common.target,
582 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
583 &dscr);
584 if (retval != ERROR_OK)
585 return retval;
586
587 return cortex_a8_read_dcc(a8, data, &dscr);
588 }
589
590 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
591 uint32_t addr, uint32_t control)
592 {
593 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
594 uint32_t vr = a8->armv7a_common.debug_base;
595 uint32_t cr = a8->armv7a_common.debug_base;
596 int retval;
597
598 switch (index_t) {
599 case 0 ... 15: /* breakpoints */
600 vr += CPUDBG_BVR_BASE;
601 cr += CPUDBG_BCR_BASE;
602 break;
603 case 16 ... 31: /* watchpoints */
604 vr += CPUDBG_WVR_BASE;
605 cr += CPUDBG_WCR_BASE;
606 index_t -= 16;
607 break;
608 default:
609 return ERROR_FAIL;
610 }
611 vr += 4 * index_t;
612 cr += 4 * index_t;
613
614 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
615 (unsigned) vr, (unsigned) cr);
616
617 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
618 vr, addr);
619 if (retval != ERROR_OK)
620 return retval;
621 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
622 cr, control);
623 return retval;
624 }
625
626 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
627 {
628 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
629 uint32_t cr;
630
631 switch (index_t) {
632 case 0 ... 15:
633 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
634 break;
635 case 16 ... 31:
636 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
637 index_t -= 16;
638 break;
639 default:
640 return ERROR_FAIL;
641 }
642 cr += 4 * index_t;
643
644 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
645
646 /* clear control register */
647 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
648 }
649
650 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
651 {
652 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
653 int retval;
654
655 dpm->arm = &a8->armv7a_common.armv4_5_common;
656 dpm->didr = didr;
657
658 dpm->prepare = cortex_a8_dpm_prepare;
659 dpm->finish = cortex_a8_dpm_finish;
660
661 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
662 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
663 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
664
665 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
666 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
667
668 dpm->bpwp_enable = cortex_a8_bpwp_enable;
669 dpm->bpwp_disable = cortex_a8_bpwp_disable;
670
671 retval = arm_dpm_setup(dpm);
672 if (retval == ERROR_OK)
673 retval = arm_dpm_initialize(dpm);
674
675 return retval;
676 }
677 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
678 {
679 struct target_list *head;
680 struct target *curr;
681
682 head = target->head;
683 while(head != (struct target_list*)NULL)
684 {
685 curr = head->target;
686 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
687 {
688 return curr;
689 }
690 head = head->next;
691 }
692 return target;
693 }
694 static int cortex_a8_halt(struct target *target);
695
696 static int cortex_a8_halt_smp(struct target *target)
697 {
698 int retval = 0;
699 struct target_list *head;
700 struct target *curr;
701 head = target->head;
702 while(head != (struct target_list*)NULL)
703 {
704 curr = head->target;
705 if ((curr != target) && (curr->state!= TARGET_HALTED))
706 {
707 retval += cortex_a8_halt(curr);
708 }
709 head = head->next;
710 }
711 return retval;
712 }
713
714 static int update_halt_gdb(struct target *target)
715 {
716 int retval = 0;
717 if (target->gdb_service->core[0]==-1)
718 {
719 target->gdb_service->target = target;
720 target->gdb_service->core[0] = target->coreid;
721 retval += cortex_a8_halt_smp(target);
722 }
723 return retval;
724 }
725
726 /*
727 * Cortex-A8 Run control
728 */
729
730 static int cortex_a8_poll(struct target *target)
731 {
732 int retval = ERROR_OK;
733 uint32_t dscr;
734 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
735 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
736 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
737 enum target_state prev_target_state = target->state;
738 // toggle to another core is done by gdb as follow
739 // maint packet J core_id
740 // continue
741 // the next polling trigger an halt event sent to gdb
742 if ((target->state == TARGET_HALTED) && (target->smp) &&
743 (target->gdb_service) &&
744 (target->gdb_service->target==NULL) )
745 {
746 target->gdb_service->target =
747 get_cortex_a8(target, target->gdb_service->core[1]);
748 target_call_event_callbacks(target,
749 TARGET_EVENT_HALTED);
750 return retval;
751 }
752 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
753 armv7a->debug_base + CPUDBG_DSCR, &dscr);
754 if (retval != ERROR_OK)
755 {
756 return retval;
757 }
758 cortex_a8->cpudbg_dscr = dscr;
759
760 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
761 {
762 if (prev_target_state != TARGET_HALTED)
763 {
764 /* We have a halting debug event */
765 LOG_DEBUG("Target halted");
766 target->state = TARGET_HALTED;
767 if ((prev_target_state == TARGET_RUNNING)
768 || (prev_target_state == TARGET_RESET))
769 {
770 retval = cortex_a8_debug_entry(target);
771 if (retval != ERROR_OK)
772 return retval;
773 if (target->smp)
774 {
775 retval = update_halt_gdb(target);
776 if (retval != ERROR_OK)
777 return retval;
778 }
779 target_call_event_callbacks(target,
780 TARGET_EVENT_HALTED);
781 }
782 if (prev_target_state == TARGET_DEBUG_RUNNING)
783 {
784 LOG_DEBUG(" ");
785
786 retval = cortex_a8_debug_entry(target);
787 if (retval != ERROR_OK)
788 return retval;
789 if (target->smp)
790 {
791 retval = update_halt_gdb(target);
792 if (retval != ERROR_OK)
793 return retval;
794 }
795
796 target_call_event_callbacks(target,
797 TARGET_EVENT_DEBUG_HALTED);
798 }
799 }
800 }
801 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
802 {
803 target->state = TARGET_RUNNING;
804 }
805 else
806 {
807 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
808 target->state = TARGET_UNKNOWN;
809 }
810
811 return retval;
812 }
813
814 static int cortex_a8_halt(struct target *target)
815 {
816 int retval = ERROR_OK;
817 uint32_t dscr;
818 struct armv7a_common *armv7a = target_to_armv7a(target);
819 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
820
821 /*
822 * Tell the core to be halted by writing DRCR with 0x1
823 * and then wait for the core to be halted.
824 */
825 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
826 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
827 if (retval != ERROR_OK)
828 return retval;
829
830 /*
831 * enter halting debug mode
832 */
833 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
834 armv7a->debug_base + CPUDBG_DSCR, &dscr);
835 if (retval != ERROR_OK)
836 return retval;
837
838 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
839 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
840 if (retval != ERROR_OK)
841 return retval;
842
843 long long then = timeval_ms();
844 for (;;)
845 {
846 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
847 armv7a->debug_base + CPUDBG_DSCR, &dscr);
848 if (retval != ERROR_OK)
849 return retval;
850 if ((dscr & DSCR_CORE_HALTED) != 0)
851 {
852 break;
853 }
854 if (timeval_ms() > then + 1000)
855 {
856 LOG_ERROR("Timeout waiting for halt");
857 return ERROR_FAIL;
858 }
859 }
860
861 target->debug_reason = DBG_REASON_DBGRQ;
862
863 return ERROR_OK;
864 }
865
866 static int cortex_a8_internal_restore(struct target *target, int current,
867 uint32_t *address, int handle_breakpoints, int debug_execution)
868 {
869 struct armv7a_common *armv7a = target_to_armv7a(target);
870 struct arm *armv4_5 = &armv7a->armv4_5_common;
871 int retval;
872 uint32_t resume_pc;
873
874 if (!debug_execution)
875 target_free_all_working_areas(target);
876
877 #if 0
878 if (debug_execution)
879 {
880 /* Disable interrupts */
881 /* We disable interrupts in the PRIMASK register instead of
882 * masking with C_MASKINTS,
883 * This is probably the same issue as Cortex-M3 Errata 377493:
884 * C_MASKINTS in parallel with disabled interrupts can cause
885 * local faults to not be taken. */
886 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
887 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
888 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
889
890 /* Make sure we are in Thumb mode */
891 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
892 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
893 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
894 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
895 }
896 #endif
897
898 /* current = 1: continue on current pc, otherwise continue at <address> */
899 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
900 if (!current)
901 resume_pc = *address;
902 else
903 *address = resume_pc;
904
905 /* Make sure that the Armv7 gdb thumb fixups does not
906 * kill the return address
907 */
908 switch (armv4_5->core_state)
909 {
910 case ARM_STATE_ARM:
911 resume_pc &= 0xFFFFFFFC;
912 break;
913 case ARM_STATE_THUMB:
914 case ARM_STATE_THUMB_EE:
915 /* When the return address is loaded into PC
916 * bit 0 must be 1 to stay in Thumb state
917 */
918 resume_pc |= 0x1;
919 break;
920 case ARM_STATE_JAZELLE:
921 LOG_ERROR("How do I resume into Jazelle state??");
922 return ERROR_FAIL;
923 }
924 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
925 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
926 armv4_5->pc->dirty = 1;
927 armv4_5->pc->valid = 1;
928
929 retval = cortex_a8_restore_context(target, handle_breakpoints);
930 if (retval != ERROR_OK)
931 return retval;
932 target->debug_reason = DBG_REASON_NOTHALTED;
933 target->state = TARGET_RUNNING;
934
935 /* registers are now invalid */
936 register_cache_invalidate(armv4_5->core_cache);
937
938 #if 0
939 /* the front-end may request us not to handle breakpoints */
940 if (handle_breakpoints)
941 {
942 /* Single step past breakpoint at current address */
943 if ((breakpoint = breakpoint_find(target, resume_pc)))
944 {
945 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
946 cortex_m3_unset_breakpoint(target, breakpoint);
947 cortex_m3_single_step_core(target);
948 cortex_m3_set_breakpoint(target, breakpoint);
949 }
950 }
951
952 #endif
953 return retval;
954 }
955
956 static int cortex_a8_internal_restart(struct target *target)
957 {
958 struct armv7a_common *armv7a = target_to_armv7a(target);
959 struct arm *armv4_5 = &armv7a->armv4_5_common;
960 struct adiv5_dap *swjdp = armv4_5->dap;
961 int retval;
962 uint32_t dscr;
963 /*
964 * Restart core and wait for it to be started. Clear ITRen and sticky
965 * exception flags: see ARMv7 ARM, C5.9.
966 *
967 * REVISIT: for single stepping, we probably want to
968 * disable IRQs by default, with optional override...
969 */
970
971 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
972 armv7a->debug_base + CPUDBG_DSCR, &dscr);
973 if (retval != ERROR_OK)
974 return retval;
975
976 if ((dscr & DSCR_INSTR_COMP) == 0)
977 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
978
979 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
980 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
981 if (retval != ERROR_OK)
982 return retval;
983
984 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
985 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
986 DRCR_CLEAR_EXCEPTIONS);
987 if (retval != ERROR_OK)
988 return retval;
989
990 long long then = timeval_ms();
991 for (;;)
992 {
993 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
994 armv7a->debug_base + CPUDBG_DSCR, &dscr);
995 if (retval != ERROR_OK)
996 return retval;
997 if ((dscr & DSCR_CORE_RESTARTED) != 0)
998 break;
999 if (timeval_ms() > then + 1000)
1000 {
1001 LOG_ERROR("Timeout waiting for resume");
1002 return ERROR_FAIL;
1003 }
1004 }
1005
1006 target->debug_reason = DBG_REASON_NOTHALTED;
1007 target->state = TARGET_RUNNING;
1008
1009 /* registers are now invalid */
1010 register_cache_invalidate(armv4_5->core_cache);
1011
1012 return ERROR_OK;
1013 }
1014
1015 static int cortex_a8_restore_smp(struct target *target,int handle_breakpoints)
1016 {
1017 int retval = 0;
1018 struct target_list *head;
1019 struct target *curr;
1020 uint32_t address;
1021 head = target->head;
1022 while(head != (struct target_list*)NULL)
1023 {
1024 curr = head->target;
1025 if ((curr != target) && (curr->state != TARGET_RUNNING))
1026 {
1027 /* resume current address , not in step mode */
1028 retval += cortex_a8_internal_restore(curr, 1, &address,
1029 handle_breakpoints, 0);
1030 retval += cortex_a8_internal_restart(curr);
1031 }
1032 head = head->next;
1033
1034 }
1035 return retval;
1036 }
1037
1038 static int cortex_a8_resume(struct target *target, int current,
1039 uint32_t address, int handle_breakpoints, int debug_execution)
1040 {
1041 int retval = 0;
1042 /* dummy resume for smp toggle in order to reduce gdb impact */
1043 if ((target->smp) && (target->gdb_service->core[1]!=-1))
1044 {
1045 /* simulate a start and halt of target */
1046 target->gdb_service->target = NULL;
1047 target->gdb_service->core[0] = target->gdb_service->core[1];
1048 /* fake resume at next poll we play the target core[1], see poll*/
1049 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1050 return 0;
1051 }
1052 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1053 if (target->smp)
1054 { target->gdb_service->core[0] = -1;
1055 retval += cortex_a8_restore_smp(target, handle_breakpoints);
1056 }
1057 cortex_a8_internal_restart(target);
1058
1059 if (!debug_execution)
1060 {
1061 target->state = TARGET_RUNNING;
1062 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1063 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1064 }
1065 else
1066 {
1067 target->state = TARGET_DEBUG_RUNNING;
1068 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1069 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1070 }
1071
1072 return ERROR_OK;
1073 }
1074
1075 static int cortex_a8_debug_entry(struct target *target)
1076 {
1077 int i;
1078 uint32_t regfile[16], cpsr, dscr;
1079 int retval = ERROR_OK;
1080 struct working_area *regfile_working_area = NULL;
1081 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1082 struct armv7a_common *armv7a = target_to_armv7a(target);
1083 struct arm *armv4_5 = &armv7a->armv4_5_common;
1084 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1085 struct reg *reg;
1086
1087 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1088
1089 /* REVISIT surely we should not re-read DSCR !! */
1090 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1091 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1092 if (retval != ERROR_OK)
1093 return retval;
1094
1095 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1096 * imprecise data aborts get discarded by issuing a Data
1097 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1098 */
1099
1100 /* Enable the ITR execution once we are in debug mode */
1101 dscr |= DSCR_ITR_EN;
1102 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1103 armv7a->debug_base + CPUDBG_DSCR, dscr);
1104 if (retval != ERROR_OK)
1105 return retval;
1106
1107 /* Examine debug reason */
1108 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1109
1110 /* save address of instruction that triggered the watchpoint? */
1111 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1112 uint32_t wfar;
1113
1114 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1115 armv7a->debug_base + CPUDBG_WFAR,
1116 &wfar);
1117 if (retval != ERROR_OK)
1118 return retval;
1119 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1120 }
1121
1122 /* REVISIT fast_reg_read is never set ... */
1123
1124 /* Examine target state and mode */
1125 if (cortex_a8->fast_reg_read)
1126 target_alloc_working_area(target, 64, &regfile_working_area);
1127
1128 /* First load register acessible through core debug port*/
1129 if (!regfile_working_area)
1130 {
1131 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1132 }
1133 else
1134 {
1135 retval = cortex_a8_read_regs_through_mem(target,
1136 regfile_working_area->address, regfile);
1137
1138 target_free_working_area(target, regfile_working_area);
1139 if (retval != ERROR_OK)
1140 {
1141 return retval;
1142 }
1143
1144 /* read Current PSR */
1145 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1146 if (retval != ERROR_OK)
1147 return retval;
1148
1149 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1150
1151 arm_set_cpsr(armv4_5, cpsr);
1152
1153 /* update cache */
1154 for (i = 0; i <= ARM_PC; i++)
1155 {
1156 reg = arm_reg_current(armv4_5, i);
1157
1158 buf_set_u32(reg->value, 0, 32, regfile[i]);
1159 reg->valid = 1;
1160 reg->dirty = 0;
1161 }
1162
1163 /* Fixup PC Resume Address */
1164 if (cpsr & (1 << 5))
1165 {
1166 // T bit set for Thumb or ThumbEE state
1167 regfile[ARM_PC] -= 4;
1168 }
1169 else
1170 {
1171 // ARM state
1172 regfile[ARM_PC] -= 8;
1173 }
1174
1175 reg = armv4_5->pc;
1176 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1177 reg->dirty = reg->valid;
1178 }
1179
1180 #if 0
1181 /* TODO, Move this */
1182 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1183 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1184 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1185
1186 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1187 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1188
1189 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1190 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1191 #endif
1192
1193 /* Are we in an exception handler */
1194 // armv4_5->exception_number = 0;
1195 if (armv7a->post_debug_entry)
1196 {
1197 retval = armv7a->post_debug_entry(target);
1198 if (retval != ERROR_OK)
1199 return retval;
1200 }
1201
1202 return retval;
1203 }
1204
1205 static int cortex_a8_post_debug_entry(struct target *target)
1206 {
1207 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1208 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1209 int retval;
1210
1211 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1212 retval = armv7a->armv4_5_common.mrc(target, 15,
1213 0, 0, /* op1, op2 */
1214 1, 0, /* CRn, CRm */
1215 &cortex_a8->cp15_control_reg);
1216 if (retval != ERROR_OK)
1217 return retval;
1218 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1219
1220 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1221 {
1222 uint32_t cache_type_reg;
1223
1224 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1225 retval = armv7a->armv4_5_common.mrc(target, 15,
1226 0, 1, /* op1, op2 */
1227 0, 0, /* CRn, CRm */
1228 &cache_type_reg);
1229 if (retval != ERROR_OK)
1230 return retval;
1231 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1232
1233 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1234 armv4_5_identify_cache(cache_type_reg,
1235 &armv7a->armv4_5_mmu.armv4_5_cache);
1236 }
1237
1238 armv7a->armv4_5_mmu.mmu_enabled =
1239 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1240 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1241 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1242 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1243 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1244
1245 return ERROR_OK;
1246 }
1247
1248 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1249 int handle_breakpoints)
1250 {
1251 struct armv7a_common *armv7a = target_to_armv7a(target);
1252 struct arm *armv4_5 = &armv7a->armv4_5_common;
1253 struct breakpoint *breakpoint = NULL;
1254 struct breakpoint stepbreakpoint;
1255 struct reg *r;
1256 int retval;
1257
1258 if (target->state != TARGET_HALTED)
1259 {
1260 LOG_WARNING("target not halted");
1261 return ERROR_TARGET_NOT_HALTED;
1262 }
1263
1264 /* current = 1: continue on current pc, otherwise continue at <address> */
1265 r = armv4_5->pc;
1266 if (!current)
1267 {
1268 buf_set_u32(r->value, 0, 32, address);
1269 }
1270 else
1271 {
1272 address = buf_get_u32(r->value, 0, 32);
1273 }
1274
1275 /* The front-end may request us not to handle breakpoints.
1276 * But since Cortex-A8 uses breakpoint for single step,
1277 * we MUST handle breakpoints.
1278 */
1279 handle_breakpoints = 1;
1280 if (handle_breakpoints) {
1281 breakpoint = breakpoint_find(target, address);
1282 if (breakpoint)
1283 cortex_a8_unset_breakpoint(target, breakpoint);
1284 }
1285
1286 /* Setup single step breakpoint */
1287 stepbreakpoint.address = address;
1288 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1289 ? 2 : 4;
1290 stepbreakpoint.type = BKPT_HARD;
1291 stepbreakpoint.set = 0;
1292
1293 /* Break on IVA mismatch */
1294 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1295
1296 target->debug_reason = DBG_REASON_SINGLESTEP;
1297
1298 retval = cortex_a8_resume(target, 1, address, 0, 0);
1299 if (retval != ERROR_OK)
1300 return retval;
1301
1302 long long then = timeval_ms();
1303 while (target->state != TARGET_HALTED)
1304 {
1305 retval = cortex_a8_poll(target);
1306 if (retval != ERROR_OK)
1307 return retval;
1308 if (timeval_ms() > then + 1000)
1309 {
1310 LOG_ERROR("timeout waiting for target halt");
1311 return ERROR_FAIL;
1312 }
1313 }
1314
1315 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1316
1317 target->debug_reason = DBG_REASON_BREAKPOINT;
1318
1319 if (breakpoint)
1320 cortex_a8_set_breakpoint(target, breakpoint, 0);
1321
1322 if (target->state != TARGET_HALTED)
1323 LOG_DEBUG("target stepped");
1324
1325 return ERROR_OK;
1326 }
1327
1328 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1329 {
1330 struct armv7a_common *armv7a = target_to_armv7a(target);
1331
1332 LOG_DEBUG(" ");
1333
1334 if (armv7a->pre_restore_context)
1335 armv7a->pre_restore_context(target);
1336
1337 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1338 }
1339
1340
1341 /*
1342 * Cortex-A8 Breakpoint and watchpoint functions
1343 */
1344
1345 /* Setup hardware Breakpoint Register Pair */
1346 static int cortex_a8_set_breakpoint(struct target *target,
1347 struct breakpoint *breakpoint, uint8_t matchmode)
1348 {
1349 int retval;
1350 int brp_i=0;
1351 uint32_t control;
1352 uint8_t byte_addr_select = 0x0F;
1353 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1354 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1355 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1356
1357 if (breakpoint->set)
1358 {
1359 LOG_WARNING("breakpoint already set");
1360 return ERROR_OK;
1361 }
1362
1363 if (breakpoint->type == BKPT_HARD)
1364 {
1365 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1366 brp_i++ ;
1367 if (brp_i >= cortex_a8->brp_num)
1368 {
1369 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1370 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1371 }
1372 breakpoint->set = brp_i + 1;
1373 if (breakpoint->length == 2)
1374 {
1375 byte_addr_select = (3 << (breakpoint->address & 0x02));
1376 }
1377 control = ((matchmode & 0x7) << 20)
1378 | (byte_addr_select << 5)
1379 | (3 << 1) | 1;
1380 brp_list[brp_i].used = 1;
1381 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1382 brp_list[brp_i].control = control;
1383 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1384 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1385 brp_list[brp_i].value);
1386 if (retval != ERROR_OK)
1387 return retval;
1388 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1389 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1390 brp_list[brp_i].control);
1391 if (retval != ERROR_OK)
1392 return retval;
1393 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1394 brp_list[brp_i].control,
1395 brp_list[brp_i].value);
1396 }
1397 else if (breakpoint->type == BKPT_SOFT)
1398 {
1399 uint8_t code[4];
1400 if (breakpoint->length == 2)
1401 {
1402 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1403 }
1404 else
1405 {
1406 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1407 }
1408 retval = target->type->read_memory(target,
1409 breakpoint->address & 0xFFFFFFFE,
1410 breakpoint->length, 1,
1411 breakpoint->orig_instr);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 retval = target->type->write_memory(target,
1415 breakpoint->address & 0xFFFFFFFE,
1416 breakpoint->length, 1, code);
1417 if (retval != ERROR_OK)
1418 return retval;
1419 breakpoint->set = 0x11; /* Any nice value but 0 */
1420 }
1421
1422 return ERROR_OK;
1423 }
1424
1425 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1426 {
1427 int retval;
1428 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1429 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1430 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1431
1432 if (!breakpoint->set)
1433 {
1434 LOG_WARNING("breakpoint not set");
1435 return ERROR_OK;
1436 }
1437
1438 if (breakpoint->type == BKPT_HARD)
1439 {
1440 int brp_i = breakpoint->set - 1;
1441 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1442 {
1443 LOG_DEBUG("Invalid BRP number in breakpoint");
1444 return ERROR_OK;
1445 }
1446 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1447 brp_list[brp_i].control, brp_list[brp_i].value);
1448 brp_list[brp_i].used = 0;
1449 brp_list[brp_i].value = 0;
1450 brp_list[brp_i].control = 0;
1451 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1452 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1453 brp_list[brp_i].control);
1454 if (retval != ERROR_OK)
1455 return retval;
1456 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1457 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1458 brp_list[brp_i].value);
1459 if (retval != ERROR_OK)
1460 return retval;
1461 }
1462 else
1463 {
1464 /* restore original instruction (kept in target endianness) */
1465 if (breakpoint->length == 4)
1466 {
1467 retval = target->type->write_memory(target,
1468 breakpoint->address & 0xFFFFFFFE,
1469 4, 1, breakpoint->orig_instr);
1470 if (retval != ERROR_OK)
1471 return retval;
1472 }
1473 else
1474 {
1475 retval = target->type->write_memory(target,
1476 breakpoint->address & 0xFFFFFFFE,
1477 2, 1, breakpoint->orig_instr);
1478 if (retval != ERROR_OK)
1479 return retval;
1480 }
1481 }
1482 breakpoint->set = 0;
1483
1484 return ERROR_OK;
1485 }
1486
1487 static int cortex_a8_add_breakpoint(struct target *target,
1488 struct breakpoint *breakpoint)
1489 {
1490 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1491
1492 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1493 {
1494 LOG_INFO("no hardware breakpoint available");
1495 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1496 }
1497
1498 if (breakpoint->type == BKPT_HARD)
1499 cortex_a8->brp_num_available--;
1500
1501 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1502 }
1503
1504 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1505 {
1506 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1507
1508 #if 0
1509 /* It is perfectly possible to remove breakpoints while the target is running */
1510 if (target->state != TARGET_HALTED)
1511 {
1512 LOG_WARNING("target not halted");
1513 return ERROR_TARGET_NOT_HALTED;
1514 }
1515 #endif
1516
1517 if (breakpoint->set)
1518 {
1519 cortex_a8_unset_breakpoint(target, breakpoint);
1520 if (breakpoint->type == BKPT_HARD)
1521 cortex_a8->brp_num_available++ ;
1522 }
1523
1524
1525 return ERROR_OK;
1526 }
1527
1528
1529
1530 /*
1531 * Cortex-A8 Reset functions
1532 */
1533
1534 static int cortex_a8_assert_reset(struct target *target)
1535 {
1536 struct armv7a_common *armv7a = target_to_armv7a(target);
1537
1538 LOG_DEBUG(" ");
1539
1540 /* FIXME when halt is requested, make it work somehow... */
1541
1542 /* Issue some kind of warm reset. */
1543 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1544 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1545 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1546 /* REVISIT handle "pulls" cases, if there's
1547 * hardware that needs them to work.
1548 */
1549 jtag_add_reset(0, 1);
1550 } else {
1551 LOG_ERROR("%s: how to reset?", target_name(target));
1552 return ERROR_FAIL;
1553 }
1554
1555 /* registers are now invalid */
1556 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1557
1558 target->state = TARGET_RESET;
1559
1560 return ERROR_OK;
1561 }
1562
1563 static int cortex_a8_deassert_reset(struct target *target)
1564 {
1565 int retval;
1566
1567 LOG_DEBUG(" ");
1568
1569 /* be certain SRST is off */
1570 jtag_add_reset(0, 0);
1571
1572 retval = cortex_a8_poll(target);
1573 if (retval != ERROR_OK)
1574 return retval;
1575
1576 if (target->reset_halt) {
1577 if (target->state != TARGET_HALTED) {
1578 LOG_WARNING("%s: ran after reset and before halt ...",
1579 target_name(target));
1580 if ((retval = target_halt(target)) != ERROR_OK)
1581 return retval;
1582 }
1583 }
1584
1585 return ERROR_OK;
1586 }
1587
1588
1589 static int cortex_a8_write_apb_ab_memory(struct target *target,
1590 uint32_t address, uint32_t size,
1591 uint32_t count, const uint8_t *buffer)
1592 {
1593
1594 /* write memory through APB-AP */
1595
1596 int retval = ERROR_INVALID_ARGUMENTS;
1597 struct armv7a_common *armv7a = target_to_armv7a(target);
1598 struct arm *armv4_5 = &armv7a->armv4_5_common;
1599 int total_bytes = count * size;
1600 int start_byte, nbytes_to_write, i;
1601 struct reg *reg;
1602 union _data {
1603 uint8_t uc_a[4];
1604 uint32_t ui;
1605 } data;
1606
1607 if (target->state != TARGET_HALTED)
1608 {
1609 LOG_WARNING("target not halted");
1610 return ERROR_TARGET_NOT_HALTED;
1611 }
1612
1613 reg = arm_reg_current(armv4_5, 0);
1614 reg->dirty = 1;
1615 reg = arm_reg_current(armv4_5, 1);
1616 reg->dirty = 1;
1617
1618 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1619 if (retval != ERROR_OK)
1620 return retval;
1621
1622 start_byte = address & 0x3;
1623
1624 while (total_bytes > 0) {
1625
1626 nbytes_to_write = 4 - start_byte;
1627 if (total_bytes < nbytes_to_write)
1628 nbytes_to_write = total_bytes;
1629
1630 if ( nbytes_to_write != 4 ) {
1631
1632 /* execute instruction LDR r1, [r0] */
1633 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1634 if (retval != ERROR_OK)
1635 return retval;
1636
1637 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 }
1641
1642 for (i = 0; i < nbytes_to_write; ++i)
1643 data.uc_a[i + start_byte] = *buffer++;
1644
1645 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1646 if (retval != ERROR_OK)
1647 return retval;
1648
1649 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1650 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0) , NULL);
1651 if (retval != ERROR_OK)
1652 return retval;
1653
1654 total_bytes -= nbytes_to_write;
1655 start_byte = 0;
1656 }
1657
1658 return retval;
1659 }
1660
1661
1662 static int cortex_a8_read_apb_ab_memory(struct target *target,
1663 uint32_t address, uint32_t size,
1664 uint32_t count, uint8_t *buffer)
1665 {
1666
1667 /* read memory through APB-AP */
1668
1669 int retval = ERROR_INVALID_ARGUMENTS;
1670 struct armv7a_common *armv7a = target_to_armv7a(target);
1671 struct arm *armv4_5 = &armv7a->armv4_5_common;
1672 int total_bytes = count * size;
1673 int start_byte, nbytes_to_read, i;
1674 struct reg *reg;
1675 union _data {
1676 uint8_t uc_a[4];
1677 uint32_t ui;
1678 } data;
1679
1680 if (target->state != TARGET_HALTED)
1681 {
1682 LOG_WARNING("target not halted");
1683 return ERROR_TARGET_NOT_HALTED;
1684 }
1685
1686 reg = arm_reg_current(armv4_5, 0);
1687 reg->dirty = 1;
1688 reg = arm_reg_current(armv4_5, 1);
1689 reg->dirty = 1;
1690
1691 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1692 if (retval != ERROR_OK)
1693 return retval;
1694
1695 start_byte = address & 0x3;
1696
1697 while (total_bytes > 0) {
1698
1699 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1700 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
1701 if (retval != ERROR_OK)
1702 return retval;
1703
1704 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1705 if (retval != ERROR_OK)
1706 return retval;
1707
1708 nbytes_to_read = 4 - start_byte;
1709 if (total_bytes < nbytes_to_read)
1710 nbytes_to_read = total_bytes;
1711
1712 for (i = 0; i < nbytes_to_read; ++i)
1713 *buffer++ = data.uc_a[i + start_byte];
1714
1715 total_bytes -= nbytes_to_read;
1716 start_byte = 0;
1717 }
1718
1719 return retval;
1720 }
1721
1722
1723
1724 /*
1725 * Cortex-A8 Memory access
1726 *
1727 * This is same Cortex M3 but we must also use the correct
1728 * ap number for every access.
1729 */
1730
1731 static int cortex_a8_read_phys_memory(struct target *target,
1732 uint32_t address, uint32_t size,
1733 uint32_t count, uint8_t *buffer)
1734 {
1735 struct armv7a_common *armv7a = target_to_armv7a(target);
1736 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1737 int retval = ERROR_INVALID_ARGUMENTS;
1738 uint8_t apsel = swjdp->apsel;
1739 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1740 address, size, count);
1741
1742 if (count && buffer) {
1743
1744 if ( apsel == swjdp_memoryap ) {
1745
1746 /* read memory through AHB-AP */
1747
1748 switch (size) {
1749 case 4:
1750 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1751 buffer, 4 * count, address);
1752 break;
1753 case 2:
1754 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1755 buffer, 2 * count, address);
1756 break;
1757 case 1:
1758 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1759 buffer, count, address);
1760 break;
1761 }
1762 } else {
1763
1764 /* read memory through APB-AP */
1765 int enabled = 0;
1766
1767 retval = cortex_a8_mmu(target, &enabled);
1768 if (retval != ERROR_OK)
1769 return retval;
1770
1771 if (enabled)
1772 {
1773 LOG_WARNING("Reading physical memory through \
1774 APB with MMU enabled is not yet implemented");
1775 return ERROR_TARGET_FAILURE;
1776 }
1777 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1778 }
1779 }
1780 return retval;
1781 }
1782
1783 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1784 uint32_t size, uint32_t count, uint8_t *buffer)
1785 {
1786 int enabled = 0;
1787 uint32_t virt, phys;
1788 int retval;
1789 struct armv7a_common *armv7a = target_to_armv7a(target);
1790 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1791 uint8_t apsel = swjdp->apsel;
1792
1793 /* cortex_a8 handles unaligned memory access */
1794 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1795 size, count);
1796 if (apsel == swjdp_memoryap) {
1797 retval = cortex_a8_mmu(target, &enabled);
1798 if (retval != ERROR_OK)
1799 return retval;
1800
1801
1802 if(enabled)
1803 {
1804 virt = address;
1805 retval = cortex_a8_virt2phys(target, virt, &phys);
1806 if (retval != ERROR_OK)
1807 return retval;
1808
1809 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
1810 virt, phys);
1811 address = phys;
1812 }
1813 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
1814 } else {
1815 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1816 }
1817 return retval;
1818 }
1819
1820 static int cortex_a8_write_phys_memory(struct target *target,
1821 uint32_t address, uint32_t size,
1822 uint32_t count, const uint8_t *buffer)
1823 {
1824 struct armv7a_common *armv7a = target_to_armv7a(target);
1825 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1826 int retval = ERROR_INVALID_ARGUMENTS;
1827 uint8_t apsel = swjdp->apsel;
1828
1829 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
1830 size, count);
1831
1832 if (count && buffer) {
1833
1834 if ( apsel == swjdp_memoryap ) {
1835
1836 /* write memory through AHB-AP */
1837
1838 switch (size) {
1839 case 4:
1840 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1841 buffer, 4 * count, address);
1842 break;
1843 case 2:
1844 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1845 buffer, 2 * count, address);
1846 break;
1847 case 1:
1848 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1849 buffer, count, address);
1850 break;
1851 }
1852
1853 } else {
1854
1855 /* write memory through APB-AP */
1856 int enabled = 0;
1857
1858 retval = cortex_a8_mmu(target, &enabled);
1859 if (retval != ERROR_OK)
1860 return retval;
1861
1862 if (enabled)
1863 {
1864 LOG_WARNING("Writing physical memory through APB with MMU" \
1865 "enabled is not yet implemented");
1866 return ERROR_TARGET_FAILURE;
1867 }
1868 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1869 }
1870 }
1871
1872
1873 /* REVISIT this op is generic ARMv7-A/R stuff */
1874 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1875 {
1876 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1877
1878 retval = dpm->prepare(dpm);
1879 if (retval != ERROR_OK)
1880 return retval;
1881
1882 /* The Cache handling will NOT work with MMU active, the
1883 * wrong addresses will be invalidated!
1884 *
1885 * For both ICache and DCache, walk all cache lines in the
1886 * address range. Cortex-A8 has fixed 64 byte line length.
1887 *
1888 * REVISIT per ARMv7, these may trigger watchpoints ...
1889 */
1890
1891 /* invalidate I-Cache */
1892 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1893 {
1894 /* ICIMVAU - Invalidate Cache single entry
1895 * with MVA to PoU
1896 * MCR p15, 0, r0, c7, c5, 1
1897 */
1898 for (uint32_t cacheline = address;
1899 cacheline < address + size * count;
1900 cacheline += 64) {
1901 retval = dpm->instr_write_data_r0(dpm,
1902 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1903 cacheline);
1904 if (retval != ERROR_OK)
1905 return retval;
1906 }
1907 }
1908
1909 /* invalidate D-Cache */
1910 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1911 {
1912 /* DCIMVAC - Invalidate data Cache line
1913 * with MVA to PoC
1914 * MCR p15, 0, r0, c7, c6, 1
1915 */
1916 for (uint32_t cacheline = address;
1917 cacheline < address + size * count;
1918 cacheline += 64) {
1919 retval = dpm->instr_write_data_r0(dpm,
1920 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1921 cacheline);
1922 if (retval != ERROR_OK)
1923 return retval;
1924 }
1925 }
1926
1927 /* (void) */ dpm->finish(dpm);
1928 }
1929
1930 return retval;
1931 }
1932
1933 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1934 uint32_t size, uint32_t count, const uint8_t *buffer)
1935 {
1936 int enabled = 0;
1937 uint32_t virt, phys;
1938 int retval;
1939 struct armv7a_common *armv7a = target_to_armv7a(target);
1940 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1941 uint8_t apsel = swjdp->apsel;
1942 /* cortex_a8 handles unaligned memory access */
1943 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1944 size, count);
1945 if (apsel == swjdp_memoryap) {
1946
1947 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1948 retval = cortex_a8_mmu(target, &enabled);
1949 if (retval != ERROR_OK)
1950 return retval;
1951
1952 if(enabled)
1953 {
1954 virt = address;
1955 retval = cortex_a8_virt2phys(target, virt, &phys);
1956 if (retval != ERROR_OK)
1957 return retval;
1958 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1959 address = phys;
1960 }
1961
1962 retval = cortex_a8_write_phys_memory(target, address, size,
1963 count, buffer);
1964 }
1965 else {
1966 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1967 }
1968 return retval;
1969 }
1970
1971 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1972 uint32_t count, const uint8_t *buffer)
1973 {
1974 return cortex_a8_write_memory(target, address, 4, count, buffer);
1975 }
1976
1977
1978 static int cortex_a8_handle_target_request(void *priv)
1979 {
1980 struct target *target = priv;
1981 struct armv7a_common *armv7a = target_to_armv7a(target);
1982 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1983 int retval;
1984
1985 if (!target_was_examined(target))
1986 return ERROR_OK;
1987 if (!target->dbg_msg_enabled)
1988 return ERROR_OK;
1989
1990 if (target->state == TARGET_RUNNING)
1991 {
1992 uint32_t request;
1993 uint32_t dscr;
1994 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1995 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1996
1997 /* check if we have data */
1998 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
1999 {
2000 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2001 armv7a->debug_base+ CPUDBG_DTRTX, &request);
2002 if (retval == ERROR_OK)
2003 {
2004 target_request(target, request);
2005 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2006 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
2007 }
2008 }
2009 }
2010
2011 return ERROR_OK;
2012 }
2013
2014 /*
2015 * Cortex-A8 target information and configuration
2016 */
2017
2018 static int cortex_a8_examine_first(struct target *target)
2019 {
2020 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2021 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2022 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2023 int i;
2024 int retval = ERROR_OK;
2025 uint32_t didr, ctypr, ttypr, cpuid;
2026
2027 /* We do one extra read to ensure DAP is configured,
2028 * we call ahbap_debugport_init(swjdp) instead
2029 */
2030 retval = ahbap_debugport_init(swjdp);
2031 if (retval != ERROR_OK)
2032 return retval;
2033
2034 if (!target->dbgbase_set)
2035 {
2036 uint32_t dbgbase;
2037 /* Get ROM Table base */
2038 uint32_t apid;
2039 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2040 if (retval != ERROR_OK)
2041 return retval;
2042 /* Lookup 0x15 -- Processor DAP */
2043 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2044 &armv7a->debug_base);
2045 if (retval != ERROR_OK)
2046 return retval;
2047 }
2048 else
2049 {
2050 armv7a->debug_base = target->dbgbase;
2051 }
2052
2053 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2054 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2055 if (retval != ERROR_OK)
2056 return retval;
2057
2058 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2059 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
2060 {
2061 LOG_DEBUG("Examine %s failed", "CPUID");
2062 return retval;
2063 }
2064
2065 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2066 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
2067 {
2068 LOG_DEBUG("Examine %s failed", "CTYPR");
2069 return retval;
2070 }
2071
2072 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2073 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
2074 {
2075 LOG_DEBUG("Examine %s failed", "TTYPR");
2076 return retval;
2077 }
2078
2079 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2080 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
2081 {
2082 LOG_DEBUG("Examine %s failed", "DIDR");
2083 return retval;
2084 }
2085
2086 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2087 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2088 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2089 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2090
2091 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
2092 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2093 if (retval != ERROR_OK)
2094 return retval;
2095
2096 /* Setup Breakpoint Register Pairs */
2097 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2098 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2099 cortex_a8->brp_num_available = cortex_a8->brp_num;
2100 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2101 // cortex_a8->brb_enabled = ????;
2102 for (i = 0; i < cortex_a8->brp_num; i++)
2103 {
2104 cortex_a8->brp_list[i].used = 0;
2105 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2106 cortex_a8->brp_list[i].type = BRP_NORMAL;
2107 else
2108 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2109 cortex_a8->brp_list[i].value = 0;
2110 cortex_a8->brp_list[i].control = 0;
2111 cortex_a8->brp_list[i].BRPn = i;
2112 }
2113
2114 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2115
2116 target_set_examined(target);
2117 return ERROR_OK;
2118 }
2119
2120 static int cortex_a8_examine(struct target *target)
2121 {
2122 int retval = ERROR_OK;
2123
2124 /* don't re-probe hardware after each reset */
2125 if (!target_was_examined(target))
2126 retval = cortex_a8_examine_first(target);
2127
2128 /* Configure core debug access */
2129 if (retval == ERROR_OK)
2130 retval = cortex_a8_init_debug_access(target);
2131
2132 return retval;
2133 }
2134
2135 /*
2136 * Cortex-A8 target creation and initialization
2137 */
2138
2139 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2140 struct target *target)
2141 {
2142 /* examine_first() does a bunch of this */
2143 return ERROR_OK;
2144 }
2145
2146 static int cortex_a8_init_arch_info(struct target *target,
2147 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2148 {
2149 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2150 struct arm *armv4_5 = &armv7a->armv4_5_common;
2151 struct adiv5_dap *dap = &armv7a->dap;
2152
2153 armv7a->armv4_5_common.dap = dap;
2154
2155 /* Setup struct cortex_a8_common */
2156 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2157 /* tap has no dap initialized */
2158 if (!tap->dap)
2159 {
2160 armv7a->armv4_5_common.dap = dap;
2161 /* Setup struct cortex_a8_common */
2162 armv4_5->arch_info = armv7a;
2163
2164 /* prepare JTAG information for the new target */
2165 cortex_a8->jtag_info.tap = tap;
2166 cortex_a8->jtag_info.scann_size = 4;
2167
2168 /* Leave (only) generic DAP stuff for debugport_init() */
2169 dap->jtag_info = &cortex_a8->jtag_info;
2170
2171 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2172 dap->tar_autoincr_block = (1 << 10);
2173 dap->memaccess_tck = 80;
2174 tap->dap = dap;
2175 }
2176 else
2177 armv7a->armv4_5_common.dap = tap->dap;
2178
2179 cortex_a8->fast_reg_read = 0;
2180
2181 /* Set default value */
2182 cortex_a8->current_address_mode = ARM_MODE_ANY;
2183
2184 /* register arch-specific functions */
2185 armv7a->examine_debug_reason = NULL;
2186
2187 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2188
2189 armv7a->pre_restore_context = NULL;
2190 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2191 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
2192 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
2193 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
2194 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
2195 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
2196 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2197 armv7a->armv4_5_mmu.mmu_enabled = 0;
2198
2199
2200 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2201
2202 /* REVISIT v7a setup should be in a v7a-specific routine */
2203 arm_init_arch_info(target, armv4_5);
2204 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2205
2206 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2207
2208 return ERROR_OK;
2209 }
2210
2211 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2212 {
2213 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2214
2215 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2216 }
2217
2218 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
2219 {
2220 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2221 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2222 uint32_t ttb = 0, retval = ERROR_OK;
2223
2224 /* current_address_mode is set inside cortex_a8_virt2phys()
2225 where we can determine if address belongs to user or kernel */
2226 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
2227 {
2228 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2229 retval = armv7a->armv4_5_common.mrc(target, 15,
2230 0, 1, /* op1, op2 */
2231 2, 0, /* CRn, CRm */
2232 &ttb);
2233 if (retval != ERROR_OK)
2234 return retval;
2235 }
2236 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
2237 {
2238 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2239 retval = armv7a->armv4_5_common.mrc(target, 15,
2240 0, 0, /* op1, op2 */
2241 2, 0, /* CRn, CRm */
2242 &ttb);
2243 if (retval != ERROR_OK)
2244 return retval;
2245 }
2246 /* we don't know whose address is: user or kernel
2247 we assume that if we are in kernel mode then
2248 address belongs to kernel else if in user mode
2249 - to user */
2250 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2251 {
2252 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2253 retval = armv7a->armv4_5_common.mrc(target, 15,
2254 0, 1, /* op1, op2 */
2255 2, 0, /* CRn, CRm */
2256 &ttb);
2257 if (retval != ERROR_OK)
2258 return retval;
2259 }
2260 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2261 {
2262 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2263 retval = armv7a->armv4_5_common.mrc(target, 15,
2264 0, 0, /* op1, op2 */
2265 2, 0, /* CRn, CRm */
2266 &ttb);
2267 if (retval != ERROR_OK)
2268 return retval;
2269 }
2270 /* finally we don't know whose ttb to use: user or kernel */
2271 else
2272 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2273
2274 ttb &= 0xffffc000;
2275
2276 *result = ttb;
2277
2278 return ERROR_OK;
2279 }
2280
2281 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
2282 int d_u_cache, int i_cache)
2283 {
2284 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2285 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2286 uint32_t cp15_control;
2287 int retval;
2288
2289 /* read cp15 control register */
2290 retval = armv7a->armv4_5_common.mrc(target, 15,
2291 0, 0, /* op1, op2 */
2292 1, 0, /* CRn, CRm */
2293 &cp15_control);
2294 if (retval != ERROR_OK)
2295 return retval;
2296
2297
2298 if (mmu)
2299 cp15_control &= ~0x1U;
2300
2301 if (d_u_cache)
2302 cp15_control &= ~0x4U;
2303
2304 if (i_cache)
2305 cp15_control &= ~0x1000U;
2306
2307 retval = armv7a->armv4_5_common.mcr(target, 15,
2308 0, 0, /* op1, op2 */
2309 1, 0, /* CRn, CRm */
2310 cp15_control);
2311 return retval;
2312 }
2313
2314 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
2315 int d_u_cache, int i_cache)
2316 {
2317 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2318 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2319 uint32_t cp15_control;
2320 int retval;
2321
2322 /* read cp15 control register */
2323 retval = armv7a->armv4_5_common.mrc(target, 15,
2324 0, 0, /* op1, op2 */
2325 1, 0, /* CRn, CRm */
2326 &cp15_control);
2327 if (retval != ERROR_OK)
2328 return retval;
2329
2330 if (mmu)
2331 cp15_control |= 0x1U;
2332
2333 if (d_u_cache)
2334 cp15_control |= 0x4U;
2335
2336 if (i_cache)
2337 cp15_control |= 0x1000U;
2338
2339 retval = armv7a->armv4_5_common.mcr(target, 15,
2340 0, 0, /* op1, op2 */
2341 1, 0, /* CRn, CRm */
2342 cp15_control);
2343 return retval;
2344 }
2345
2346
2347 static int cortex_a8_mmu(struct target *target, int *enabled)
2348 {
2349 if (target->state != TARGET_HALTED) {
2350 LOG_ERROR("%s: target not halted", __func__);
2351 return ERROR_TARGET_INVALID;
2352 }
2353
2354 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2355 return ERROR_OK;
2356 }
2357
2358 static int cortex_a8_virt2phys(struct target *target,
2359 uint32_t virt, uint32_t *phys)
2360 {
2361 uint32_t cb;
2362 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2363 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2364 struct armv7a_common *armv7a = target_to_armv7a(target);
2365
2366 /* We assume that virtual address is separated
2367 between user and kernel in Linux style:
2368 0x00000000-0xbfffffff - User space
2369 0xc0000000-0xffffffff - Kernel space */
2370 if( virt < 0xc0000000 ) /* Linux user space */
2371 cortex_a8->current_address_mode = ARM_MODE_USR;
2372 else /* Linux kernel */
2373 cortex_a8->current_address_mode = ARM_MODE_SVC;
2374 uint32_t ret;
2375 int retval = armv4_5_mmu_translate_va(target,
2376 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2377 if (retval != ERROR_OK)
2378 return retval;
2379 /* Reset the flag. We don't want someone else to use it by error */
2380 cortex_a8->current_address_mode = ARM_MODE_ANY;
2381
2382 *phys = ret;
2383 return ERROR_OK;
2384 }
2385
2386 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2387 {
2388 struct target *target = get_current_target(CMD_CTX);
2389 struct armv7a_common *armv7a = target_to_armv7a(target);
2390
2391 return armv4_5_handle_cache_info_command(CMD_CTX,
2392 &armv7a->armv4_5_mmu.armv4_5_cache);
2393 }
2394
2395
2396 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2397 {
2398 struct target *target = get_current_target(CMD_CTX);
2399 if (!target_was_examined(target))
2400 {
2401 LOG_ERROR("target not examined yet");
2402 return ERROR_FAIL;
2403 }
2404
2405 return cortex_a8_init_debug_access(target);
2406 }
2407 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2408 {
2409 struct target *target = get_current_target(CMD_CTX);
2410 /* check target is an smp target */
2411 struct target_list *head;
2412 struct target *curr;
2413 head = target->head;
2414 target->smp = 0;
2415 if (head != (struct target_list*)NULL)
2416 {
2417 while (head != (struct target_list*)NULL)
2418 {
2419 curr = head->target;
2420 curr->smp = 0;
2421 head = head->next;
2422 }
2423 /* fixes the target display to the debugger */
2424 target->gdb_service->target = target;
2425 }
2426 return ERROR_OK;
2427 }
2428
2429 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2430 {
2431 struct target *target = get_current_target(CMD_CTX);
2432 struct target_list *head;
2433 struct target *curr;
2434 head = target->head;
2435 if (head != (struct target_list*)NULL)
2436 { target->smp=1;
2437 while (head != (struct target_list*)NULL)
2438 {
2439 curr = head->target;
2440 curr->smp = 1;
2441 head = head->next;
2442 }
2443 }
2444 return ERROR_OK;
2445 }
2446
2447 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2448 {
2449 struct target *target = get_current_target(CMD_CTX);
2450 int retval = ERROR_OK;
2451 struct target_list *head;
2452 head = target->head;
2453 if (head != (struct target_list*)NULL)
2454 {
2455 if (CMD_ARGC == 1)
2456 {
2457 int coreid = 0;
2458 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2459 if (ERROR_OK != retval)
2460 return retval;
2461 target->gdb_service->core[1]=coreid;
2462
2463 }
2464 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2465 , target->gdb_service->core[1]);
2466 }
2467 return ERROR_OK;
2468 }
2469
2470 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2471 {
2472 .name = "cache_info",
2473 .handler = cortex_a8_handle_cache_info_command,
2474 .mode = COMMAND_EXEC,
2475 .help = "display information about target caches",
2476 },
2477 {
2478 .name = "dbginit",
2479 .handler = cortex_a8_handle_dbginit_command,
2480 .mode = COMMAND_EXEC,
2481 .help = "Initialize core debug",
2482 },
2483 { .name ="smp_off",
2484 .handler = cortex_a8_handle_smp_off_command,
2485 .mode = COMMAND_EXEC,
2486 .help = "Stop smp handling",
2487 },
2488 {
2489 .name ="smp_on",
2490 .handler = cortex_a8_handle_smp_on_command,
2491 .mode = COMMAND_EXEC,
2492 .help = "Restart smp handling",
2493 },
2494 {
2495 .name ="smp_gdb",
2496 .handler = cortex_a8_handle_smp_gdb_command,
2497 .mode = COMMAND_EXEC,
2498 .help = "display/fix current core played to gdb",
2499 },
2500
2501
2502 COMMAND_REGISTRATION_DONE
2503 };
2504 static const struct command_registration cortex_a8_command_handlers[] = {
2505 {
2506 .chain = arm_command_handlers,
2507 },
2508 {
2509 .chain = armv7a_command_handlers,
2510 },
2511 {
2512 .name = "cortex_a8",
2513 .mode = COMMAND_ANY,
2514 .help = "Cortex-A8 command group",
2515 .chain = cortex_a8_exec_command_handlers,
2516 },
2517 COMMAND_REGISTRATION_DONE
2518 };
2519
2520 struct target_type cortexa8_target = {
2521 .name = "cortex_a8",
2522
2523 .poll = cortex_a8_poll,
2524 .arch_state = armv7a_arch_state,
2525
2526 .target_request_data = NULL,
2527
2528 .halt = cortex_a8_halt,
2529 .resume = cortex_a8_resume,
2530 .step = cortex_a8_step,
2531
2532 .assert_reset = cortex_a8_assert_reset,
2533 .deassert_reset = cortex_a8_deassert_reset,
2534 .soft_reset_halt = NULL,
2535
2536 /* REVISIT allow exporting VFP3 registers ... */
2537 .get_gdb_reg_list = arm_get_gdb_reg_list,
2538
2539 .read_memory = cortex_a8_read_memory,
2540 .write_memory = cortex_a8_write_memory,
2541 .bulk_write_memory = cortex_a8_bulk_write_memory,
2542
2543 .checksum_memory = arm_checksum_memory,
2544 .blank_check_memory = arm_blank_check_memory,
2545
2546 .run_algorithm = armv4_5_run_algorithm,
2547
2548 .add_breakpoint = cortex_a8_add_breakpoint,
2549 .remove_breakpoint = cortex_a8_remove_breakpoint,
2550 .add_watchpoint = NULL,
2551 .remove_watchpoint = NULL,
2552
2553 .commands = cortex_a8_command_handlers,
2554 .target_create = cortex_a8_target_create,
2555 .init_target = cortex_a8_init_target,
2556 .examine = cortex_a8_examine,
2557
2558 .read_phys_memory = cortex_a8_read_phys_memory,
2559 .write_phys_memory = cortex_a8_write_phys_memory,
2560 .mmu = cortex_a8_mmu,
2561 .virt2phys = cortex_a8_virt2phys,
2562
2563 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)