arm mmu: error propagation added for address translation
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
38 #include "register.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
42
43 static int cortex_a8_poll(struct target *target);
44 static int cortex_a8_debug_entry(struct target *target);
45 static int cortex_a8_restore_context(struct target *target, bool bpwp);
46 static int cortex_a8_set_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int cortex_a8_unset_breakpoint(struct target *target,
49 struct breakpoint *breakpoint);
50 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
51 uint32_t *value, int regnum);
52 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
53 uint32_t value, int regnum);
54 static int cortex_a8_mmu(struct target *target, int *enabled);
55 static int cortex_a8_virt2phys(struct target *target,
56 uint32_t virt, uint32_t *phys);
57 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
58 int d_u_cache, int i_cache);
59 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
60 int d_u_cache, int i_cache);
61 static uint32_t cortex_a8_get_ttb(struct target *target);
62
63
64 /*
65 * FIXME do topology discovery using the ROM; don't
66 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
67 * cores, with different AP numbering ... don't use a #define
68 * for these numbers, use per-core armv7a state.
69 */
70 #define swjdp_memoryap 0
71 #define swjdp_debugap 1
72 #define OMAP3530_DEBUG_BASE 0x54011000
73
74 /*
75 * Cortex-A8 Basic debug access, very low level assumes state is saved
76 */
77 static int cortex_a8_init_debug_access(struct target *target)
78 {
79 struct armv7a_common *armv7a = target_to_armv7a(target);
80 struct adiv5_dap *swjdp = &armv7a->dap;
81
82 int retval;
83 uint32_t dummy;
84
85 LOG_DEBUG(" ");
86
87 /* Unlocking the debug registers for modification */
88 /* The debugport might be uninitialised so try twice */
89 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
90 if (retval != ERROR_OK)
91 mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
92 /* Clear Sticky Power Down status Bit in PRSR to enable access to
93 the registers in the Core Power Domain */
94 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
95 /* Enabling of instruction execution in debug mode is done in debug_entry code */
96
97 /* Resync breakpoint registers */
98
99 /* Since this is likley called from init or reset, update targtet state information*/
100 cortex_a8_poll(target);
101
102 return retval;
103 }
104
105 /* To reduce needless round-trips, pass in a pointer to the current
106 * DSCR value. Initialize it to zero if you just need to know the
107 * value on return from this function; or DSCR_INSTR_COMP if you
108 * happen to know that no instruction is pending.
109 */
110 static int cortex_a8_exec_opcode(struct target *target,
111 uint32_t opcode, uint32_t *dscr_p)
112 {
113 uint32_t dscr;
114 int retval;
115 struct armv7a_common *armv7a = target_to_armv7a(target);
116 struct adiv5_dap *swjdp = &armv7a->dap;
117
118 dscr = dscr_p ? *dscr_p : 0;
119
120 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
121
122 /* Wait for InstrCompl bit to be set */
123 while ((dscr & DSCR_INSTR_COMP) == 0)
124 {
125 retval = mem_ap_read_atomic_u32(swjdp,
126 armv7a->debug_base + CPUDBG_DSCR, &dscr);
127 if (retval != ERROR_OK)
128 {
129 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
130 return retval;
131 }
132 }
133
134 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
135
136 do
137 {
138 retval = mem_ap_read_atomic_u32(swjdp,
139 armv7a->debug_base + CPUDBG_DSCR, &dscr);
140 if (retval != ERROR_OK)
141 {
142 LOG_ERROR("Could not read DSCR register");
143 return retval;
144 }
145 }
146 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
147
148 if (dscr_p)
149 *dscr_p = dscr;
150
151 return retval;
152 }
153
154 /**************************************************************************
155 Read core register with very few exec_opcode, fast but needs work_area.
156 This can cause problems with MMU active.
157 **************************************************************************/
158 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
159 uint32_t * regfile)
160 {
161 int retval = ERROR_OK;
162 struct armv7a_common *armv7a = target_to_armv7a(target);
163 struct adiv5_dap *swjdp = &armv7a->dap;
164
165 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
166 cortex_a8_dap_write_coreregister_u32(target, address, 0);
167 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
168 dap_ap_select(swjdp, swjdp_memoryap);
169 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
170 dap_ap_select(swjdp, swjdp_debugap);
171
172 return retval;
173 }
174
175 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
176 uint32_t *value, int regnum)
177 {
178 int retval = ERROR_OK;
179 uint8_t reg = regnum&0xFF;
180 uint32_t dscr = 0;
181 struct armv7a_common *armv7a = target_to_armv7a(target);
182 struct adiv5_dap *swjdp = &armv7a->dap;
183
184 if (reg > 17)
185 return retval;
186
187 if (reg < 15)
188 {
189 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
190 cortex_a8_exec_opcode(target,
191 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
192 &dscr);
193 }
194 else if (reg == 15)
195 {
196 /* "MOV r0, r15"; then move r0 to DCCTX */
197 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
198 cortex_a8_exec_opcode(target,
199 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
200 &dscr);
201 }
202 else
203 {
204 /* "MRS r0, CPSR" or "MRS r0, SPSR"
205 * then move r0 to DCCTX
206 */
207 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
208 cortex_a8_exec_opcode(target,
209 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
210 &dscr);
211 }
212
213 /* Wait for DTRRXfull then read DTRRTX */
214 while ((dscr & DSCR_DTR_TX_FULL) == 0)
215 {
216 retval = mem_ap_read_atomic_u32(swjdp,
217 armv7a->debug_base + CPUDBG_DSCR, &dscr);
218 }
219
220 retval = mem_ap_read_atomic_u32(swjdp,
221 armv7a->debug_base + CPUDBG_DTRTX, value);
222 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
223
224 return retval;
225 }
226
227 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
228 uint32_t value, int regnum)
229 {
230 int retval = ERROR_OK;
231 uint8_t Rd = regnum&0xFF;
232 uint32_t dscr;
233 struct armv7a_common *armv7a = target_to_armv7a(target);
234 struct adiv5_dap *swjdp = &armv7a->dap;
235
236 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
237
238 /* Check that DCCRX is not full */
239 retval = mem_ap_read_atomic_u32(swjdp,
240 armv7a->debug_base + CPUDBG_DSCR, &dscr);
241 if (dscr & DSCR_DTR_RX_FULL)
242 {
243 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
244 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
245 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
246 &dscr);
247 }
248
249 if (Rd > 17)
250 return retval;
251
252 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
253 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
254 retval = mem_ap_write_u32(swjdp,
255 armv7a->debug_base + CPUDBG_DTRRX, value);
256
257 if (Rd < 15)
258 {
259 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
260 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
261 &dscr);
262 }
263 else if (Rd == 15)
264 {
265 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
266 * then "mov r15, r0"
267 */
268 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
269 &dscr);
270 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
271 }
272 else
273 {
274 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
275 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
276 */
277 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
278 &dscr);
279 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
280 &dscr);
281
282 /* "Prefetch flush" after modifying execution status in CPSR */
283 if (Rd == 16)
284 cortex_a8_exec_opcode(target,
285 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
286 &dscr);
287 }
288
289 return retval;
290 }
291
292 /* Write to memory mapped registers directly with no cache or mmu handling */
293 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
294 {
295 int retval;
296 struct armv7a_common *armv7a = target_to_armv7a(target);
297 struct adiv5_dap *swjdp = &armv7a->dap;
298
299 retval = mem_ap_write_atomic_u32(swjdp, address, value);
300
301 return retval;
302 }
303
304 /*
305 * Cortex-A8 implementation of Debug Programmer's Model
306 *
307 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
308 * so there's no need to poll for it before executing an instruction.
309 *
310 * NOTE that in several of these cases the "stall" mode might be useful.
311 * It'd let us queue a few operations together... prepare/finish might
312 * be the places to enable/disable that mode.
313 */
314
315 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
316 {
317 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
318 }
319
320 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
321 {
322 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
323 return mem_ap_write_u32(&a8->armv7a_common.dap,
324 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
325 }
326
327 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
328 uint32_t *dscr_p)
329 {
330 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
331 uint32_t dscr = DSCR_INSTR_COMP;
332 int retval;
333
334 if (dscr_p)
335 dscr = *dscr_p;
336
337 /* Wait for DTRRXfull */
338 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
339 retval = mem_ap_read_atomic_u32(swjdp,
340 a8->armv7a_common.debug_base + CPUDBG_DSCR,
341 &dscr);
342 }
343
344 retval = mem_ap_read_atomic_u32(swjdp,
345 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
346 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
347
348 if (dscr_p)
349 *dscr_p = dscr;
350
351 return retval;
352 }
353
354 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
355 {
356 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
357 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
358 uint32_t dscr;
359 int retval;
360
361 /* set up invariant: INSTR_COMP is set after ever DPM operation */
362 do {
363 retval = mem_ap_read_atomic_u32(swjdp,
364 a8->armv7a_common.debug_base + CPUDBG_DSCR,
365 &dscr);
366 } while ((dscr & DSCR_INSTR_COMP) == 0);
367
368 /* this "should never happen" ... */
369 if (dscr & DSCR_DTR_RX_FULL) {
370 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
371 /* Clear DCCRX */
372 retval = cortex_a8_exec_opcode(
373 a8->armv7a_common.armv4_5_common.target,
374 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
375 &dscr);
376 }
377
378 return retval;
379 }
380
381 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
382 {
383 /* REVISIT what could be done here? */
384 return ERROR_OK;
385 }
386
387 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
388 uint32_t opcode, uint32_t data)
389 {
390 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
391 int retval;
392 uint32_t dscr = DSCR_INSTR_COMP;
393
394 retval = cortex_a8_write_dcc(a8, data);
395
396 return cortex_a8_exec_opcode(
397 a8->armv7a_common.armv4_5_common.target,
398 opcode,
399 &dscr);
400 }
401
402 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
403 uint32_t opcode, uint32_t data)
404 {
405 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
406 uint32_t dscr = DSCR_INSTR_COMP;
407 int retval;
408
409 retval = cortex_a8_write_dcc(a8, data);
410
411 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
412 retval = cortex_a8_exec_opcode(
413 a8->armv7a_common.armv4_5_common.target,
414 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
415 &dscr);
416
417 /* then the opcode, taking data from R0 */
418 retval = cortex_a8_exec_opcode(
419 a8->armv7a_common.armv4_5_common.target,
420 opcode,
421 &dscr);
422
423 return retval;
424 }
425
426 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
427 {
428 struct target *target = dpm->arm->target;
429 uint32_t dscr = DSCR_INSTR_COMP;
430
431 /* "Prefetch flush" after modifying execution status in CPSR */
432 return cortex_a8_exec_opcode(target,
433 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
434 &dscr);
435 }
436
437 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
438 uint32_t opcode, uint32_t *data)
439 {
440 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
441 int retval;
442 uint32_t dscr = DSCR_INSTR_COMP;
443
444 /* the opcode, writing data to DCC */
445 retval = cortex_a8_exec_opcode(
446 a8->armv7a_common.armv4_5_common.target,
447 opcode,
448 &dscr);
449
450 return cortex_a8_read_dcc(a8, data, &dscr);
451 }
452
453
454 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
455 uint32_t opcode, uint32_t *data)
456 {
457 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
458 uint32_t dscr = DSCR_INSTR_COMP;
459 int retval;
460
461 /* the opcode, writing data to R0 */
462 retval = cortex_a8_exec_opcode(
463 a8->armv7a_common.armv4_5_common.target,
464 opcode,
465 &dscr);
466
467 /* write R0 to DCC */
468 retval = cortex_a8_exec_opcode(
469 a8->armv7a_common.armv4_5_common.target,
470 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
471 &dscr);
472
473 return cortex_a8_read_dcc(a8, data, &dscr);
474 }
475
476 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index,
477 uint32_t addr, uint32_t control)
478 {
479 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
480 uint32_t vr = a8->armv7a_common.debug_base;
481 uint32_t cr = a8->armv7a_common.debug_base;
482 int retval;
483
484 switch (index) {
485 case 0 ... 15: /* breakpoints */
486 vr += CPUDBG_BVR_BASE;
487 cr += CPUDBG_BCR_BASE;
488 break;
489 case 16 ... 31: /* watchpoints */
490 vr += CPUDBG_WVR_BASE;
491 cr += CPUDBG_WCR_BASE;
492 index -= 16;
493 break;
494 default:
495 return ERROR_FAIL;
496 }
497 vr += 4 * index;
498 cr += 4 * index;
499
500 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
501 (unsigned) vr, (unsigned) cr);
502
503 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
504 vr, addr);
505 if (retval != ERROR_OK)
506 return retval;
507 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
508 cr, control);
509 return retval;
510 }
511
512 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index)
513 {
514 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
515 uint32_t cr;
516
517 switch (index) {
518 case 0 ... 15:
519 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
520 break;
521 case 16 ... 31:
522 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
523 index -= 16;
524 break;
525 default:
526 return ERROR_FAIL;
527 }
528 cr += 4 * index;
529
530 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
531
532 /* clear control register */
533 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
534 }
535
536 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
537 {
538 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
539 int retval;
540
541 dpm->arm = &a8->armv7a_common.armv4_5_common;
542 dpm->didr = didr;
543
544 dpm->prepare = cortex_a8_dpm_prepare;
545 dpm->finish = cortex_a8_dpm_finish;
546
547 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
548 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
549 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
550
551 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
552 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
553
554 dpm->bpwp_enable = cortex_a8_bpwp_enable;
555 dpm->bpwp_disable = cortex_a8_bpwp_disable;
556
557 retval = arm_dpm_setup(dpm);
558 if (retval == ERROR_OK)
559 retval = arm_dpm_initialize(dpm);
560
561 return retval;
562 }
563
564
565 /*
566 * Cortex-A8 Run control
567 */
568
569 static int cortex_a8_poll(struct target *target)
570 {
571 int retval = ERROR_OK;
572 uint32_t dscr;
573 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
574 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
575 struct adiv5_dap *swjdp = &armv7a->dap;
576 enum target_state prev_target_state = target->state;
577 uint8_t saved_apsel = dap_ap_get_select(swjdp);
578
579 dap_ap_select(swjdp, swjdp_debugap);
580 retval = mem_ap_read_atomic_u32(swjdp,
581 armv7a->debug_base + CPUDBG_DSCR, &dscr);
582 if (retval != ERROR_OK)
583 {
584 dap_ap_select(swjdp, saved_apsel);
585 return retval;
586 }
587 cortex_a8->cpudbg_dscr = dscr;
588
589 if ((dscr & 0x3) == 0x3)
590 {
591 if (prev_target_state != TARGET_HALTED)
592 {
593 /* We have a halting debug event */
594 LOG_DEBUG("Target halted");
595 target->state = TARGET_HALTED;
596 if ((prev_target_state == TARGET_RUNNING)
597 || (prev_target_state == TARGET_RESET))
598 {
599 retval = cortex_a8_debug_entry(target);
600 if (retval != ERROR_OK)
601 return retval;
602
603 target_call_event_callbacks(target,
604 TARGET_EVENT_HALTED);
605 }
606 if (prev_target_state == TARGET_DEBUG_RUNNING)
607 {
608 LOG_DEBUG(" ");
609
610 retval = cortex_a8_debug_entry(target);
611 if (retval != ERROR_OK)
612 return retval;
613
614 target_call_event_callbacks(target,
615 TARGET_EVENT_DEBUG_HALTED);
616 }
617 }
618 }
619 else if ((dscr & 0x3) == 0x2)
620 {
621 target->state = TARGET_RUNNING;
622 }
623 else
624 {
625 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
626 target->state = TARGET_UNKNOWN;
627 }
628
629 dap_ap_select(swjdp, saved_apsel);
630
631 return retval;
632 }
633
634 static int cortex_a8_halt(struct target *target)
635 {
636 int retval = ERROR_OK;
637 uint32_t dscr;
638 struct armv7a_common *armv7a = target_to_armv7a(target);
639 struct adiv5_dap *swjdp = &armv7a->dap;
640 uint8_t saved_apsel = dap_ap_get_select(swjdp);
641 dap_ap_select(swjdp, swjdp_debugap);
642
643 /*
644 * Tell the core to be halted by writing DRCR with 0x1
645 * and then wait for the core to be halted.
646 */
647 retval = mem_ap_write_atomic_u32(swjdp,
648 armv7a->debug_base + CPUDBG_DRCR, 0x1);
649
650 /*
651 * enter halting debug mode
652 */
653 mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
654 retval = mem_ap_write_atomic_u32(swjdp,
655 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
656
657 if (retval != ERROR_OK)
658 goto out;
659
660 do {
661 mem_ap_read_atomic_u32(swjdp,
662 armv7a->debug_base + CPUDBG_DSCR, &dscr);
663 } while ((dscr & DSCR_CORE_HALTED) == 0);
664
665 target->debug_reason = DBG_REASON_DBGRQ;
666
667 out:
668 dap_ap_select(swjdp, saved_apsel);
669 return retval;
670 }
671
672 static int cortex_a8_resume(struct target *target, int current,
673 uint32_t address, int handle_breakpoints, int debug_execution)
674 {
675 struct armv7a_common *armv7a = target_to_armv7a(target);
676 struct arm *armv4_5 = &armv7a->armv4_5_common;
677 struct adiv5_dap *swjdp = &armv7a->dap;
678
679 // struct breakpoint *breakpoint = NULL;
680 uint32_t resume_pc, dscr;
681
682 uint8_t saved_apsel = dap_ap_get_select(swjdp);
683 dap_ap_select(swjdp, swjdp_debugap);
684
685 if (!debug_execution)
686 target_free_all_working_areas(target);
687
688 #if 0
689 if (debug_execution)
690 {
691 /* Disable interrupts */
692 /* We disable interrupts in the PRIMASK register instead of
693 * masking with C_MASKINTS,
694 * This is probably the same issue as Cortex-M3 Errata 377493:
695 * C_MASKINTS in parallel with disabled interrupts can cause
696 * local faults to not be taken. */
697 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
698 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
699 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
700
701 /* Make sure we are in Thumb mode */
702 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
703 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
704 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
705 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
706 }
707 #endif
708
709 /* current = 1: continue on current pc, otherwise continue at <address> */
710 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
711 if (!current)
712 resume_pc = address;
713
714 /* Make sure that the Armv7 gdb thumb fixups does not
715 * kill the return address
716 */
717 switch (armv4_5->core_state)
718 {
719 case ARM_STATE_ARM:
720 resume_pc &= 0xFFFFFFFC;
721 break;
722 case ARM_STATE_THUMB:
723 case ARM_STATE_THUMB_EE:
724 /* When the return address is loaded into PC
725 * bit 0 must be 1 to stay in Thumb state
726 */
727 resume_pc |= 0x1;
728 break;
729 case ARM_STATE_JAZELLE:
730 LOG_ERROR("How do I resume into Jazelle state??");
731 return ERROR_FAIL;
732 }
733 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
734 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
735 armv4_5->pc->dirty = 1;
736 armv4_5->pc->valid = 1;
737
738 cortex_a8_restore_context(target, handle_breakpoints);
739
740 #if 0
741 /* the front-end may request us not to handle breakpoints */
742 if (handle_breakpoints)
743 {
744 /* Single step past breakpoint at current address */
745 if ((breakpoint = breakpoint_find(target, resume_pc)))
746 {
747 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
748 cortex_m3_unset_breakpoint(target, breakpoint);
749 cortex_m3_single_step_core(target);
750 cortex_m3_set_breakpoint(target, breakpoint);
751 }
752 }
753
754 #endif
755 /* Restart core and wait for it to be started
756 * NOTE: this clears DSCR_ITR_EN and other bits.
757 *
758 * REVISIT: for single stepping, we probably want to
759 * disable IRQs by default, with optional override...
760 */
761 mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
762
763 do {
764 mem_ap_read_atomic_u32(swjdp,
765 armv7a->debug_base + CPUDBG_DSCR, &dscr);
766 } while ((dscr & DSCR_CORE_RESTARTED) == 0);
767
768 target->debug_reason = DBG_REASON_NOTHALTED;
769 target->state = TARGET_RUNNING;
770
771 /* registers are now invalid */
772 register_cache_invalidate(armv4_5->core_cache);
773
774 if (!debug_execution)
775 {
776 target->state = TARGET_RUNNING;
777 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
778 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
779 }
780 else
781 {
782 target->state = TARGET_DEBUG_RUNNING;
783 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
784 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
785 }
786
787 dap_ap_select(swjdp, saved_apsel);
788
789 return ERROR_OK;
790 }
791
792 static int cortex_a8_debug_entry(struct target *target)
793 {
794 int i;
795 uint32_t regfile[16], cpsr, dscr;
796 int retval = ERROR_OK;
797 struct working_area *regfile_working_area = NULL;
798 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
799 struct armv7a_common *armv7a = target_to_armv7a(target);
800 struct arm *armv4_5 = &armv7a->armv4_5_common;
801 struct adiv5_dap *swjdp = &armv7a->dap;
802 struct reg *reg;
803
804 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
805
806 /* REVISIT surely we should not re-read DSCR !! */
807 mem_ap_read_atomic_u32(swjdp,
808 armv7a->debug_base + CPUDBG_DSCR, &dscr);
809
810 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
811 * imprecise data aborts get discarded by issuing a Data
812 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
813 */
814
815 /* Enable the ITR execution once we are in debug mode */
816 dscr |= DSCR_ITR_EN;
817 retval = mem_ap_write_atomic_u32(swjdp,
818 armv7a->debug_base + CPUDBG_DSCR, dscr);
819
820 /* Examine debug reason */
821 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
822
823 /* save address of instruction that triggered the watchpoint? */
824 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
825 uint32_t wfar;
826
827 retval = mem_ap_read_atomic_u32(swjdp,
828 armv7a->debug_base + CPUDBG_WFAR,
829 &wfar);
830 arm_dpm_report_wfar(&armv7a->dpm, wfar);
831 }
832
833 /* REVISIT fast_reg_read is never set ... */
834
835 /* Examine target state and mode */
836 if (cortex_a8->fast_reg_read)
837 target_alloc_working_area(target, 64, &regfile_working_area);
838
839 /* First load register acessible through core debug port*/
840 if (!regfile_working_area)
841 {
842 retval = arm_dpm_read_current_registers(&armv7a->dpm);
843 }
844 else
845 {
846 dap_ap_select(swjdp, swjdp_memoryap);
847 cortex_a8_read_regs_through_mem(target,
848 regfile_working_area->address, regfile);
849 dap_ap_select(swjdp, swjdp_memoryap);
850 target_free_working_area(target, regfile_working_area);
851
852 /* read Current PSR */
853 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
854 dap_ap_select(swjdp, swjdp_debugap);
855 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
856
857 arm_set_cpsr(armv4_5, cpsr);
858
859 /* update cache */
860 for (i = 0; i <= ARM_PC; i++)
861 {
862 reg = arm_reg_current(armv4_5, i);
863
864 buf_set_u32(reg->value, 0, 32, regfile[i]);
865 reg->valid = 1;
866 reg->dirty = 0;
867 }
868
869 /* Fixup PC Resume Address */
870 if (cpsr & (1 << 5))
871 {
872 // T bit set for Thumb or ThumbEE state
873 regfile[ARM_PC] -= 4;
874 }
875 else
876 {
877 // ARM state
878 regfile[ARM_PC] -= 8;
879 }
880
881 reg = armv4_5->pc;
882 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
883 reg->dirty = reg->valid;
884 }
885
886 #if 0
887 /* TODO, Move this */
888 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
889 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
890 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
891
892 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
893 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
894
895 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
896 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
897 #endif
898
899 /* Are we in an exception handler */
900 // armv4_5->exception_number = 0;
901 if (armv7a->post_debug_entry)
902 armv7a->post_debug_entry(target);
903
904 return retval;
905 }
906
907 static void cortex_a8_post_debug_entry(struct target *target)
908 {
909 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
910 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
911 int retval;
912
913 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
914 retval = armv7a->armv4_5_common.mrc(target, 15,
915 0, 0, /* op1, op2 */
916 1, 0, /* CRn, CRm */
917 &cortex_a8->cp15_control_reg);
918 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
919
920 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
921 {
922 uint32_t cache_type_reg;
923
924 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
925 retval = armv7a->armv4_5_common.mrc(target, 15,
926 0, 1, /* op1, op2 */
927 0, 0, /* CRn, CRm */
928 &cache_type_reg);
929 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
930
931 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
932 armv4_5_identify_cache(cache_type_reg,
933 &armv7a->armv4_5_mmu.armv4_5_cache);
934 }
935
936 armv7a->armv4_5_mmu.mmu_enabled =
937 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
938 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
939 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
940 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
941 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
942
943
944 }
945
946 static int cortex_a8_step(struct target *target, int current, uint32_t address,
947 int handle_breakpoints)
948 {
949 struct armv7a_common *armv7a = target_to_armv7a(target);
950 struct arm *armv4_5 = &armv7a->armv4_5_common;
951 struct breakpoint *breakpoint = NULL;
952 struct breakpoint stepbreakpoint;
953 struct reg *r;
954
955 int timeout = 100;
956
957 if (target->state != TARGET_HALTED)
958 {
959 LOG_WARNING("target not halted");
960 return ERROR_TARGET_NOT_HALTED;
961 }
962
963 /* current = 1: continue on current pc, otherwise continue at <address> */
964 r = armv4_5->pc;
965 if (!current)
966 {
967 buf_set_u32(r->value, 0, 32, address);
968 }
969 else
970 {
971 address = buf_get_u32(r->value, 0, 32);
972 }
973
974 /* The front-end may request us not to handle breakpoints.
975 * But since Cortex-A8 uses breakpoint for single step,
976 * we MUST handle breakpoints.
977 */
978 handle_breakpoints = 1;
979 if (handle_breakpoints) {
980 breakpoint = breakpoint_find(target, address);
981 if (breakpoint)
982 cortex_a8_unset_breakpoint(target, breakpoint);
983 }
984
985 /* Setup single step breakpoint */
986 stepbreakpoint.address = address;
987 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
988 ? 2 : 4;
989 stepbreakpoint.type = BKPT_HARD;
990 stepbreakpoint.set = 0;
991
992 /* Break on IVA mismatch */
993 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
994
995 target->debug_reason = DBG_REASON_SINGLESTEP;
996
997 cortex_a8_resume(target, 1, address, 0, 0);
998
999 while (target->state != TARGET_HALTED)
1000 {
1001 cortex_a8_poll(target);
1002 if (--timeout == 0)
1003 {
1004 LOG_WARNING("timeout waiting for target halt");
1005 break;
1006 }
1007 }
1008
1009 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1010 if (timeout > 0)
1011 target->debug_reason = DBG_REASON_BREAKPOINT;
1012
1013 if (breakpoint)
1014 cortex_a8_set_breakpoint(target, breakpoint, 0);
1015
1016 if (target->state != TARGET_HALTED)
1017 LOG_DEBUG("target stepped");
1018
1019 return ERROR_OK;
1020 }
1021
1022 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1023 {
1024 struct armv7a_common *armv7a = target_to_armv7a(target);
1025
1026 LOG_DEBUG(" ");
1027
1028 if (armv7a->pre_restore_context)
1029 armv7a->pre_restore_context(target);
1030
1031 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1032
1033 return ERROR_OK;
1034 }
1035
1036
1037 /*
1038 * Cortex-A8 Breakpoint and watchpoint fuctions
1039 */
1040
1041 /* Setup hardware Breakpoint Register Pair */
1042 static int cortex_a8_set_breakpoint(struct target *target,
1043 struct breakpoint *breakpoint, uint8_t matchmode)
1044 {
1045 int retval;
1046 int brp_i=0;
1047 uint32_t control;
1048 uint8_t byte_addr_select = 0x0F;
1049 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1050 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1051 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1052
1053 if (breakpoint->set)
1054 {
1055 LOG_WARNING("breakpoint already set");
1056 return ERROR_OK;
1057 }
1058
1059 if (breakpoint->type == BKPT_HARD)
1060 {
1061 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1062 brp_i++ ;
1063 if (brp_i >= cortex_a8->brp_num)
1064 {
1065 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1066 return ERROR_FAIL;
1067 }
1068 breakpoint->set = brp_i + 1;
1069 if (breakpoint->length == 2)
1070 {
1071 byte_addr_select = (3 << (breakpoint->address & 0x02));
1072 }
1073 control = ((matchmode & 0x7) << 20)
1074 | (byte_addr_select << 5)
1075 | (3 << 1) | 1;
1076 brp_list[brp_i].used = 1;
1077 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1078 brp_list[brp_i].control = control;
1079 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1080 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1081 brp_list[brp_i].value);
1082 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1083 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1084 brp_list[brp_i].control);
1085 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1086 brp_list[brp_i].control,
1087 brp_list[brp_i].value);
1088 }
1089 else if (breakpoint->type == BKPT_SOFT)
1090 {
1091 uint8_t code[4];
1092 if (breakpoint->length == 2)
1093 {
1094 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1095 }
1096 else
1097 {
1098 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1099 }
1100 retval = target->type->read_memory(target,
1101 breakpoint->address & 0xFFFFFFFE,
1102 breakpoint->length, 1,
1103 breakpoint->orig_instr);
1104 if (retval != ERROR_OK)
1105 return retval;
1106 retval = target->type->write_memory(target,
1107 breakpoint->address & 0xFFFFFFFE,
1108 breakpoint->length, 1, code);
1109 if (retval != ERROR_OK)
1110 return retval;
1111 breakpoint->set = 0x11; /* Any nice value but 0 */
1112 }
1113
1114 return ERROR_OK;
1115 }
1116
1117 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1118 {
1119 int retval;
1120 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1121 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1122 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1123
1124 if (!breakpoint->set)
1125 {
1126 LOG_WARNING("breakpoint not set");
1127 return ERROR_OK;
1128 }
1129
1130 if (breakpoint->type == BKPT_HARD)
1131 {
1132 int brp_i = breakpoint->set - 1;
1133 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1134 {
1135 LOG_DEBUG("Invalid BRP number in breakpoint");
1136 return ERROR_OK;
1137 }
1138 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1139 brp_list[brp_i].control, brp_list[brp_i].value);
1140 brp_list[brp_i].used = 0;
1141 brp_list[brp_i].value = 0;
1142 brp_list[brp_i].control = 0;
1143 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1144 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1145 brp_list[brp_i].control);
1146 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1147 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1148 brp_list[brp_i].value);
1149 }
1150 else
1151 {
1152 /* restore original instruction (kept in target endianness) */
1153 if (breakpoint->length == 4)
1154 {
1155 retval = target->type->write_memory(target,
1156 breakpoint->address & 0xFFFFFFFE,
1157 4, 1, breakpoint->orig_instr);
1158 if (retval != ERROR_OK)
1159 return retval;
1160 }
1161 else
1162 {
1163 retval = target->type->write_memory(target,
1164 breakpoint->address & 0xFFFFFFFE,
1165 2, 1, breakpoint->orig_instr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168 }
1169 }
1170 breakpoint->set = 0;
1171
1172 return ERROR_OK;
1173 }
1174
1175 static int cortex_a8_add_breakpoint(struct target *target,
1176 struct breakpoint *breakpoint)
1177 {
1178 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1179
1180 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1181 {
1182 LOG_INFO("no hardware breakpoint available");
1183 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1184 }
1185
1186 if (breakpoint->type == BKPT_HARD)
1187 cortex_a8->brp_num_available--;
1188 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1189
1190 return ERROR_OK;
1191 }
1192
1193 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1194 {
1195 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1196
1197 #if 0
1198 /* It is perfectly possible to remove brakpoints while the taget is running */
1199 if (target->state != TARGET_HALTED)
1200 {
1201 LOG_WARNING("target not halted");
1202 return ERROR_TARGET_NOT_HALTED;
1203 }
1204 #endif
1205
1206 if (breakpoint->set)
1207 {
1208 cortex_a8_unset_breakpoint(target, breakpoint);
1209 if (breakpoint->type == BKPT_HARD)
1210 cortex_a8->brp_num_available++ ;
1211 }
1212
1213
1214 return ERROR_OK;
1215 }
1216
1217
1218
1219 /*
1220 * Cortex-A8 Reset fuctions
1221 */
1222
1223 static int cortex_a8_assert_reset(struct target *target)
1224 {
1225 struct armv7a_common *armv7a = target_to_armv7a(target);
1226
1227 LOG_DEBUG(" ");
1228
1229 /* FIXME when halt is requested, make it work somehow... */
1230
1231 /* Issue some kind of warm reset. */
1232 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1233 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1234 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1235 /* REVISIT handle "pulls" cases, if there's
1236 * hardware that needs them to work.
1237 */
1238 jtag_add_reset(0, 1);
1239 } else {
1240 LOG_ERROR("%s: how to reset?", target_name(target));
1241 return ERROR_FAIL;
1242 }
1243
1244 /* registers are now invalid */
1245 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1246
1247 target->state = TARGET_RESET;
1248
1249 return ERROR_OK;
1250 }
1251
1252 static int cortex_a8_deassert_reset(struct target *target)
1253 {
1254 int retval;
1255
1256 LOG_DEBUG(" ");
1257
1258 /* be certain SRST is off */
1259 jtag_add_reset(0, 0);
1260
1261 retval = cortex_a8_poll(target);
1262
1263 if (target->reset_halt) {
1264 if (target->state != TARGET_HALTED) {
1265 LOG_WARNING("%s: ran after reset and before halt ...",
1266 target_name(target));
1267 if ((retval = target_halt(target)) != ERROR_OK)
1268 return retval;
1269 }
1270 }
1271
1272 return ERROR_OK;
1273 }
1274
1275 /*
1276 * Cortex-A8 Memory access
1277 *
1278 * This is same Cortex M3 but we must also use the correct
1279 * ap number for every access.
1280 */
1281
1282 static int cortex_a8_read_phys_memory(struct target *target,
1283 uint32_t address, uint32_t size,
1284 uint32_t count, uint8_t *buffer)
1285 {
1286 struct armv7a_common *armv7a = target_to_armv7a(target);
1287 struct adiv5_dap *swjdp = &armv7a->dap;
1288 int retval = ERROR_INVALID_ARGUMENTS;
1289
1290 /* cortex_a8 handles unaligned memory access */
1291
1292 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1293 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1294 if (count && buffer) {
1295 switch (size) {
1296 case 4:
1297 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1298 break;
1299 case 2:
1300 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1301 break;
1302 case 1:
1303 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1304 break;
1305 }
1306 }
1307
1308 return retval;
1309 }
1310
1311 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1312 uint32_t size, uint32_t count, uint8_t *buffer)
1313 {
1314 int enabled = 0;
1315 uint32_t virt, phys;
1316
1317 /* cortex_a8 handles unaligned memory access */
1318
1319 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1320 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1321 cortex_a8_mmu(target, &enabled);
1322 if(enabled)
1323 {
1324 virt = address;
1325 cortex_a8_virt2phys(target, virt, &phys);
1326 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1327 address = phys;
1328 }
1329
1330 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1331 }
1332
1333 static int cortex_a8_write_phys_memory(struct target *target,
1334 uint32_t address, uint32_t size,
1335 uint32_t count, uint8_t *buffer)
1336 {
1337 struct armv7a_common *armv7a = target_to_armv7a(target);
1338 struct adiv5_dap *swjdp = &armv7a->dap;
1339 int retval = ERROR_INVALID_ARGUMENTS;
1340
1341 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1342
1343 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1344 if (count && buffer) {
1345 switch (size) {
1346 case 4:
1347 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1348 break;
1349 case 2:
1350 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1351 break;
1352 case 1:
1353 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1354 break;
1355 }
1356 }
1357
1358 /* REVISIT this op is generic ARMv7-A/R stuff */
1359 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1360 {
1361 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1362
1363 retval = dpm->prepare(dpm);
1364 if (retval != ERROR_OK)
1365 return retval;
1366
1367 /* The Cache handling will NOT work with MMU active, the
1368 * wrong addresses will be invalidated!
1369 *
1370 * For both ICache and DCache, walk all cache lines in the
1371 * address range. Cortex-A8 has fixed 64 byte line length.
1372 *
1373 * REVISIT per ARMv7, these may trigger watchpoints ...
1374 */
1375
1376 /* invalidate I-Cache */
1377 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1378 {
1379 /* ICIMVAU - Invalidate Cache single entry
1380 * with MVA to PoU
1381 * MCR p15, 0, r0, c7, c5, 1
1382 */
1383 for (uint32_t cacheline = address;
1384 cacheline < address + size * count;
1385 cacheline += 64) {
1386 retval = dpm->instr_write_data_r0(dpm,
1387 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1388 cacheline);
1389 }
1390 }
1391
1392 /* invalidate D-Cache */
1393 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1394 {
1395 /* DCIMVAC - Invalidate data Cache line
1396 * with MVA to PoC
1397 * MCR p15, 0, r0, c7, c6, 1
1398 */
1399 for (uint32_t cacheline = address;
1400 cacheline < address + size * count;
1401 cacheline += 64) {
1402 retval = dpm->instr_write_data_r0(dpm,
1403 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1404 cacheline);
1405 }
1406 }
1407
1408 /* (void) */ dpm->finish(dpm);
1409 }
1410
1411 return retval;
1412 }
1413
1414 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1415 uint32_t size, uint32_t count, uint8_t *buffer)
1416 {
1417 int enabled = 0;
1418 uint32_t virt, phys;
1419
1420 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1421
1422 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1423 cortex_a8_mmu(target, &enabled);
1424 if(enabled)
1425 {
1426 virt = address;
1427 cortex_a8_virt2phys(target, virt, &phys);
1428 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1429 address = phys;
1430 }
1431
1432 return cortex_a8_write_phys_memory(target, address, size,
1433 count, buffer);
1434 }
1435
1436 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1437 uint32_t count, uint8_t *buffer)
1438 {
1439 return cortex_a8_write_memory(target, address, 4, count, buffer);
1440 }
1441
1442
1443 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1444 {
1445 #if 0
1446 u16 dcrdr;
1447
1448 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1449 *ctrl = (uint8_t)dcrdr;
1450 *value = (uint8_t)(dcrdr >> 8);
1451
1452 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1453
1454 /* write ack back to software dcc register
1455 * signify we have read data */
1456 if (dcrdr & (1 << 0))
1457 {
1458 dcrdr = 0;
1459 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1460 }
1461 #endif
1462 return ERROR_OK;
1463 }
1464
1465
1466 static int cortex_a8_handle_target_request(void *priv)
1467 {
1468 struct target *target = priv;
1469 struct armv7a_common *armv7a = target_to_armv7a(target);
1470 struct adiv5_dap *swjdp = &armv7a->dap;
1471
1472 if (!target_was_examined(target))
1473 return ERROR_OK;
1474 if (!target->dbg_msg_enabled)
1475 return ERROR_OK;
1476
1477 if (target->state == TARGET_RUNNING)
1478 {
1479 uint8_t data = 0;
1480 uint8_t ctrl = 0;
1481
1482 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1483
1484 /* check if we have data */
1485 if (ctrl & (1 << 0))
1486 {
1487 uint32_t request;
1488
1489 /* we assume target is quick enough */
1490 request = data;
1491 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1492 request |= (data << 8);
1493 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1494 request |= (data << 16);
1495 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1496 request |= (data << 24);
1497 target_request(target, request);
1498 }
1499 }
1500
1501 return ERROR_OK;
1502 }
1503
1504 /*
1505 * Cortex-A8 target information and configuration
1506 */
1507
1508 static int cortex_a8_examine_first(struct target *target)
1509 {
1510 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1511 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1512 struct adiv5_dap *swjdp = &armv7a->dap;
1513 int i;
1514 int retval = ERROR_OK;
1515 uint32_t didr, ctypr, ttypr, cpuid;
1516
1517 /* stop assuming this is an OMAP! */
1518 LOG_DEBUG("TODO - autoconfigure");
1519
1520 /* Here we shall insert a proper ROM Table scan */
1521 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1522
1523 /* We do one extra read to ensure DAP is configured,
1524 * we call ahbap_debugport_init(swjdp) instead
1525 */
1526 ahbap_debugport_init(swjdp);
1527 mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1528 if ((retval = mem_ap_read_atomic_u32(swjdp,
1529 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1530 {
1531 LOG_DEBUG("Examine %s failed", "CPUID");
1532 return retval;
1533 }
1534
1535 if ((retval = mem_ap_read_atomic_u32(swjdp,
1536 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1537 {
1538 LOG_DEBUG("Examine %s failed", "CTYPR");
1539 return retval;
1540 }
1541
1542 if ((retval = mem_ap_read_atomic_u32(swjdp,
1543 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1544 {
1545 LOG_DEBUG("Examine %s failed", "TTYPR");
1546 return retval;
1547 }
1548
1549 if ((retval = mem_ap_read_atomic_u32(swjdp,
1550 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1551 {
1552 LOG_DEBUG("Examine %s failed", "DIDR");
1553 return retval;
1554 }
1555
1556 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1557 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1558 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1559 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1560
1561 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1562 cortex_a8_dpm_setup(cortex_a8, didr);
1563
1564 /* Setup Breakpoint Register Pairs */
1565 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1566 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1567 cortex_a8->brp_num_available = cortex_a8->brp_num;
1568 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1569 // cortex_a8->brb_enabled = ????;
1570 for (i = 0; i < cortex_a8->brp_num; i++)
1571 {
1572 cortex_a8->brp_list[i].used = 0;
1573 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1574 cortex_a8->brp_list[i].type = BRP_NORMAL;
1575 else
1576 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1577 cortex_a8->brp_list[i].value = 0;
1578 cortex_a8->brp_list[i].control = 0;
1579 cortex_a8->brp_list[i].BRPn = i;
1580 }
1581
1582 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1583
1584 target_set_examined(target);
1585 return ERROR_OK;
1586 }
1587
1588 static int cortex_a8_examine(struct target *target)
1589 {
1590 int retval = ERROR_OK;
1591
1592 /* don't re-probe hardware after each reset */
1593 if (!target_was_examined(target))
1594 retval = cortex_a8_examine_first(target);
1595
1596 /* Configure core debug access */
1597 if (retval == ERROR_OK)
1598 retval = cortex_a8_init_debug_access(target);
1599
1600 return retval;
1601 }
1602
1603 /*
1604 * Cortex-A8 target creation and initialization
1605 */
1606
1607 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1608 struct target *target)
1609 {
1610 /* examine_first() does a bunch of this */
1611 return ERROR_OK;
1612 }
1613
1614 static int cortex_a8_init_arch_info(struct target *target,
1615 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1616 {
1617 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1618 struct arm *armv4_5 = &armv7a->armv4_5_common;
1619 struct adiv5_dap *dap = &armv7a->dap;
1620
1621 armv7a->armv4_5_common.dap = dap;
1622
1623 /* Setup struct cortex_a8_common */
1624 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1625 armv4_5->arch_info = armv7a;
1626
1627 /* prepare JTAG information for the new target */
1628 cortex_a8->jtag_info.tap = tap;
1629 cortex_a8->jtag_info.scann_size = 4;
1630
1631 /* Leave (only) generic DAP stuff for debugport_init() */
1632 dap->jtag_info = &cortex_a8->jtag_info;
1633 dap->memaccess_tck = 80;
1634
1635 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1636 dap->tar_autoincr_block = (1 << 10);
1637
1638 cortex_a8->fast_reg_read = 0;
1639
1640 /* Set default value */
1641 cortex_a8->current_address_mode = ARM_MODE_ANY;
1642
1643 /* register arch-specific functions */
1644 armv7a->examine_debug_reason = NULL;
1645
1646 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1647
1648 armv7a->pre_restore_context = NULL;
1649 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1650 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1651 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1652 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1653 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1654 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1655 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1656 armv7a->armv4_5_mmu.mmu_enabled = 0;
1657
1658
1659 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1660
1661 /* REVISIT v7a setup should be in a v7a-specific routine */
1662 arm_init_arch_info(target, armv4_5);
1663 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1664
1665 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1666
1667 return ERROR_OK;
1668 }
1669
1670 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1671 {
1672 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1673
1674 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1675
1676 return ERROR_OK;
1677 }
1678
1679 static uint32_t cortex_a8_get_ttb(struct target *target)
1680 {
1681 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1682 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1683 uint32_t ttb = 0, retval = ERROR_OK;
1684
1685 /* current_address_mode is set inside cortex_a8_virt2phys()
1686 where we can determine if address belongs to user or kernel */
1687 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1688 {
1689 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1690 retval = armv7a->armv4_5_common.mrc(target, 15,
1691 0, 1, /* op1, op2 */
1692 2, 0, /* CRn, CRm */
1693 &ttb);
1694 }
1695 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1696 {
1697 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1698 retval = armv7a->armv4_5_common.mrc(target, 15,
1699 0, 0, /* op1, op2 */
1700 2, 0, /* CRn, CRm */
1701 &ttb);
1702 }
1703 /* we don't know whose address is: user or kernel
1704 we assume that if we are in kernel mode then
1705 address belongs to kernel else if in user mode
1706 - to user */
1707 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1708 {
1709 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1710 retval = armv7a->armv4_5_common.mrc(target, 15,
1711 0, 1, /* op1, op2 */
1712 2, 0, /* CRn, CRm */
1713 &ttb);
1714 }
1715 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1716 {
1717 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1718 retval = armv7a->armv4_5_common.mrc(target, 15,
1719 0, 0, /* op1, op2 */
1720 2, 0, /* CRn, CRm */
1721 &ttb);
1722 }
1723 /* finaly we don't know whose ttb to use: user or kernel */
1724 else
1725 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1726
1727 ttb &= 0xffffc000;
1728
1729 return ttb;
1730 }
1731
1732 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1733 int d_u_cache, int i_cache)
1734 {
1735 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1736 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1737 uint32_t cp15_control;
1738
1739 /* read cp15 control register */
1740 armv7a->armv4_5_common.mrc(target, 15,
1741 0, 0, /* op1, op2 */
1742 1, 0, /* CRn, CRm */
1743 &cp15_control);
1744
1745
1746 if (mmu)
1747 cp15_control &= ~0x1U;
1748
1749 if (d_u_cache)
1750 cp15_control &= ~0x4U;
1751
1752 if (i_cache)
1753 cp15_control &= ~0x1000U;
1754
1755 armv7a->armv4_5_common.mcr(target, 15,
1756 0, 0, /* op1, op2 */
1757 1, 0, /* CRn, CRm */
1758 cp15_control);
1759 }
1760
1761 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1762 int d_u_cache, int i_cache)
1763 {
1764 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1765 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1766 uint32_t cp15_control;
1767
1768 /* read cp15 control register */
1769 armv7a->armv4_5_common.mrc(target, 15,
1770 0, 0, /* op1, op2 */
1771 1, 0, /* CRn, CRm */
1772 &cp15_control);
1773
1774 if (mmu)
1775 cp15_control |= 0x1U;
1776
1777 if (d_u_cache)
1778 cp15_control |= 0x4U;
1779
1780 if (i_cache)
1781 cp15_control |= 0x1000U;
1782
1783 armv7a->armv4_5_common.mcr(target, 15,
1784 0, 0, /* op1, op2 */
1785 1, 0, /* CRn, CRm */
1786 cp15_control);
1787 }
1788
1789
1790 static int cortex_a8_mmu(struct target *target, int *enabled)
1791 {
1792 if (target->state != TARGET_HALTED) {
1793 LOG_ERROR("%s: target not halted", __func__);
1794 return ERROR_TARGET_INVALID;
1795 }
1796
1797 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1798 return ERROR_OK;
1799 }
1800
1801 static int cortex_a8_virt2phys(struct target *target,
1802 uint32_t virt, uint32_t *phys)
1803 {
1804 int type;
1805 uint32_t cb;
1806 int domain;
1807 uint32_t ap;
1808 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1809 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1810 struct armv7a_common *armv7a = target_to_armv7a(target);
1811
1812 /* We assume that virtual address is separated
1813 between user and kernel in Linux style:
1814 0x00000000-0xbfffffff - User space
1815 0xc0000000-0xffffffff - Kernel space */
1816 if( virt < 0xc0000000 ) /* Linux user space */
1817 cortex_a8->current_address_mode = ARM_MODE_USR;
1818 else /* Linux kernel */
1819 cortex_a8->current_address_mode = ARM_MODE_SVC;
1820 uint32_t ret;
1821 int retval = armv4_5_mmu_translate_va(target,
1822 &armv7a->armv4_5_mmu, virt, &type, &cb, &domain, &ap, &ret);
1823 if (retval != ERROR_OK)
1824 return retval;
1825 /* Reset the flag. We don't want someone else to use it by error */
1826 cortex_a8->current_address_mode = ARM_MODE_ANY;
1827
1828 if (type == -1)
1829 {
1830 return ret;
1831 }
1832 *phys = ret;
1833 return ERROR_OK;
1834 }
1835
1836 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1837 {
1838 struct target *target = get_current_target(CMD_CTX);
1839 struct armv7a_common *armv7a = target_to_armv7a(target);
1840
1841 return armv4_5_handle_cache_info_command(CMD_CTX,
1842 &armv7a->armv4_5_mmu.armv4_5_cache);
1843 }
1844
1845
1846 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1847 {
1848 struct target *target = get_current_target(CMD_CTX);
1849
1850 cortex_a8_init_debug_access(target);
1851
1852 return ERROR_OK;
1853 }
1854
1855 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1856 {
1857 .name = "cache_info",
1858 .handler = cortex_a8_handle_cache_info_command,
1859 .mode = COMMAND_EXEC,
1860 .help = "display information about target caches",
1861 },
1862 {
1863 .name = "dbginit",
1864 .handler = cortex_a8_handle_dbginit_command,
1865 .mode = COMMAND_EXEC,
1866 .help = "Initialize core debug",
1867 },
1868 COMMAND_REGISTRATION_DONE
1869 };
1870 static const struct command_registration cortex_a8_command_handlers[] = {
1871 {
1872 .chain = arm_command_handlers,
1873 },
1874 {
1875 .chain = armv7a_command_handlers,
1876 },
1877 {
1878 .name = "cortex_a8",
1879 .mode = COMMAND_ANY,
1880 .help = "Cortex-A8 command group",
1881 .chain = cortex_a8_exec_command_handlers,
1882 },
1883 COMMAND_REGISTRATION_DONE
1884 };
1885
1886 struct target_type cortexa8_target = {
1887 .name = "cortex_a8",
1888
1889 .poll = cortex_a8_poll,
1890 .arch_state = armv7a_arch_state,
1891
1892 .target_request_data = NULL,
1893
1894 .halt = cortex_a8_halt,
1895 .resume = cortex_a8_resume,
1896 .step = cortex_a8_step,
1897
1898 .assert_reset = cortex_a8_assert_reset,
1899 .deassert_reset = cortex_a8_deassert_reset,
1900 .soft_reset_halt = NULL,
1901
1902 /* REVISIT allow exporting VFP3 registers ... */
1903 .get_gdb_reg_list = arm_get_gdb_reg_list,
1904
1905 .read_memory = cortex_a8_read_memory,
1906 .write_memory = cortex_a8_write_memory,
1907 .bulk_write_memory = cortex_a8_bulk_write_memory,
1908
1909 .checksum_memory = arm_checksum_memory,
1910 .blank_check_memory = arm_blank_check_memory,
1911
1912 .run_algorithm = armv4_5_run_algorithm,
1913
1914 .add_breakpoint = cortex_a8_add_breakpoint,
1915 .remove_breakpoint = cortex_a8_remove_breakpoint,
1916 .add_watchpoint = NULL,
1917 .remove_watchpoint = NULL,
1918
1919 .commands = cortex_a8_command_handlers,
1920 .target_create = cortex_a8_target_create,
1921 .init_target = cortex_a8_init_target,
1922 .examine = cortex_a8_examine,
1923
1924 .read_phys_memory = cortex_a8_read_phys_memory,
1925 .write_phys_memory = cortex_a8_write_phys_memory,
1926 .mmu = cortex_a8_mmu,
1927 .virt2phys = cortex_a8_virt2phys,
1928
1929 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)