82ce9a18b46efab3d05998f281463479f258cb63
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static uint32_t cortex_a8_get_ttb(struct target *target);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76 #define OMAP3530_DEBUG_BASE 0x54011000
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85
86 int retval;
87 uint32_t dummy;
88
89 LOG_DEBUG(" ");
90
91 /* Unlocking the debug registers for modification */
92 /* The debugport might be uninitialised so try twice */
93 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
108 if (retval != ERROR_OK)
109 return retval;
110
111 /* Enabling of instruction execution in debug mode is done in debug_entry code */
112
113 /* Resync breakpoint registers */
114
115 /* Since this is likely called from init or reset, update target state information*/
116 retval = cortex_a8_poll(target);
117
118 return retval;
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 while ((dscr & DSCR_INSTR_COMP) == 0)
140 {
141 retval = mem_ap_read_atomic_u32(swjdp,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
144 {
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
147 }
148 }
149
150 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
151
152 do
153 {
154 retval = mem_ap_read_atomic_u32(swjdp,
155 armv7a->debug_base + CPUDBG_DSCR, &dscr);
156 if (retval != ERROR_OK)
157 {
158 LOG_ERROR("Could not read DSCR register");
159 return retval;
160 }
161 }
162 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
163
164 if (dscr_p)
165 *dscr_p = dscr;
166
167 return retval;
168 }
169
170 /**************************************************************************
171 Read core register with very few exec_opcode, fast but needs work_area.
172 This can cause problems with MMU active.
173 **************************************************************************/
174 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
175 uint32_t * regfile)
176 {
177 int retval = ERROR_OK;
178 struct armv7a_common *armv7a = target_to_armv7a(target);
179 struct adiv5_dap *swjdp = &armv7a->dap;
180
181 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
182 cortex_a8_dap_write_coreregister_u32(target, address, 0);
183 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
184 dap_ap_select(swjdp, swjdp_memoryap);
185 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
186 dap_ap_select(swjdp, swjdp_debugap);
187
188 return retval;
189 }
190
191 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
192 uint32_t *value, int regnum)
193 {
194 int retval = ERROR_OK;
195 uint8_t reg = regnum&0xFF;
196 uint32_t dscr = 0;
197 struct armv7a_common *armv7a = target_to_armv7a(target);
198 struct adiv5_dap *swjdp = &armv7a->dap;
199
200 if (reg > 17)
201 return retval;
202
203 if (reg < 15)
204 {
205 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
206 cortex_a8_exec_opcode(target,
207 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
208 &dscr);
209 }
210 else if (reg == 15)
211 {
212 /* "MOV r0, r15"; then move r0 to DCCTX */
213 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
214 cortex_a8_exec_opcode(target,
215 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
216 &dscr);
217 }
218 else
219 {
220 /* "MRS r0, CPSR" or "MRS r0, SPSR"
221 * then move r0 to DCCTX
222 */
223 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
224 cortex_a8_exec_opcode(target,
225 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
226 &dscr);
227 }
228
229 /* Wait for DTRRXfull then read DTRRTX */
230 while ((dscr & DSCR_DTR_TX_FULL) == 0)
231 {
232 retval = mem_ap_read_atomic_u32(swjdp,
233 armv7a->debug_base + CPUDBG_DSCR, &dscr);
234 if (retval != ERROR_OK)
235 return retval;
236 }
237
238 retval = mem_ap_read_atomic_u32(swjdp,
239 armv7a->debug_base + CPUDBG_DTRTX, value);
240 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
241
242 return retval;
243 }
244
245 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
246 uint32_t value, int regnum)
247 {
248 int retval = ERROR_OK;
249 uint8_t Rd = regnum&0xFF;
250 uint32_t dscr;
251 struct armv7a_common *armv7a = target_to_armv7a(target);
252 struct adiv5_dap *swjdp = &armv7a->dap;
253
254 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
255
256 /* Check that DCCRX is not full */
257 retval = mem_ap_read_atomic_u32(swjdp,
258 armv7a->debug_base + CPUDBG_DSCR, &dscr);
259 if (retval != ERROR_OK)
260 return retval;
261 if (dscr & DSCR_DTR_RX_FULL)
262 {
263 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
264 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
265 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
266 &dscr);
267 }
268
269 if (Rd > 17)
270 return retval;
271
272 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
273 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
274 retval = mem_ap_write_u32(swjdp,
275 armv7a->debug_base + CPUDBG_DTRRX, value);
276
277 if (Rd < 15)
278 {
279 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
280 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
281 &dscr);
282 }
283 else if (Rd == 15)
284 {
285 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
286 * then "mov r15, r0"
287 */
288 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
289 &dscr);
290 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
291 }
292 else
293 {
294 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
295 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
296 */
297 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
298 &dscr);
299 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
300 &dscr);
301
302 /* "Prefetch flush" after modifying execution status in CPSR */
303 if (Rd == 16)
304 cortex_a8_exec_opcode(target,
305 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
306 &dscr);
307 }
308
309 return retval;
310 }
311
312 /* Write to memory mapped registers directly with no cache or mmu handling */
313 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
314 {
315 int retval;
316 struct armv7a_common *armv7a = target_to_armv7a(target);
317 struct adiv5_dap *swjdp = &armv7a->dap;
318
319 retval = mem_ap_write_atomic_u32(swjdp, address, value);
320
321 return retval;
322 }
323
324 /*
325 * Cortex-A8 implementation of Debug Programmer's Model
326 *
327 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
328 * so there's no need to poll for it before executing an instruction.
329 *
330 * NOTE that in several of these cases the "stall" mode might be useful.
331 * It'd let us queue a few operations together... prepare/finish might
332 * be the places to enable/disable that mode.
333 */
334
335 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
336 {
337 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
338 }
339
340 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
341 {
342 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
343 return mem_ap_write_u32(&a8->armv7a_common.dap,
344 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
345 }
346
347 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
348 uint32_t *dscr_p)
349 {
350 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
351 uint32_t dscr = DSCR_INSTR_COMP;
352 int retval;
353
354 if (dscr_p)
355 dscr = *dscr_p;
356
357 /* Wait for DTRRXfull */
358 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
359 retval = mem_ap_read_atomic_u32(swjdp,
360 a8->armv7a_common.debug_base + CPUDBG_DSCR,
361 &dscr);
362 if (retval != ERROR_OK)
363 return retval;
364 }
365
366 retval = mem_ap_read_atomic_u32(swjdp,
367 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
368 if (retval != ERROR_OK)
369 return retval;
370 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
371
372 if (dscr_p)
373 *dscr_p = dscr;
374
375 return retval;
376 }
377
378 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
379 {
380 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
381 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
382 uint32_t dscr;
383 int retval;
384
385 /* set up invariant: INSTR_COMP is set after ever DPM operation */
386 long long then = timeval_ms();
387 for (;;)
388 {
389 retval = mem_ap_read_atomic_u32(swjdp,
390 a8->armv7a_common.debug_base + CPUDBG_DSCR,
391 &dscr);
392 if (retval != ERROR_OK)
393 return retval;
394 if ((dscr & DSCR_INSTR_COMP) != 0)
395 break;
396 if (timeval_ms() > then + 1000)
397 {
398 LOG_ERROR("Timeout waiting for dpm prepare");
399 return ERROR_FAIL;
400 }
401 }
402
403 /* this "should never happen" ... */
404 if (dscr & DSCR_DTR_RX_FULL) {
405 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
406 /* Clear DCCRX */
407 retval = cortex_a8_exec_opcode(
408 a8->armv7a_common.armv4_5_common.target,
409 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
410 &dscr);
411 }
412
413 return retval;
414 }
415
416 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
417 {
418 /* REVISIT what could be done here? */
419 return ERROR_OK;
420 }
421
422 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
423 uint32_t opcode, uint32_t data)
424 {
425 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
426 int retval;
427 uint32_t dscr = DSCR_INSTR_COMP;
428
429 retval = cortex_a8_write_dcc(a8, data);
430
431 return cortex_a8_exec_opcode(
432 a8->armv7a_common.armv4_5_common.target,
433 opcode,
434 &dscr);
435 }
436
437 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
438 uint32_t opcode, uint32_t data)
439 {
440 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
441 uint32_t dscr = DSCR_INSTR_COMP;
442 int retval;
443
444 retval = cortex_a8_write_dcc(a8, data);
445
446 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
447 retval = cortex_a8_exec_opcode(
448 a8->armv7a_common.armv4_5_common.target,
449 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
450 &dscr);
451
452 /* then the opcode, taking data from R0 */
453 retval = cortex_a8_exec_opcode(
454 a8->armv7a_common.armv4_5_common.target,
455 opcode,
456 &dscr);
457
458 return retval;
459 }
460
461 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
462 {
463 struct target *target = dpm->arm->target;
464 uint32_t dscr = DSCR_INSTR_COMP;
465
466 /* "Prefetch flush" after modifying execution status in CPSR */
467 return cortex_a8_exec_opcode(target,
468 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
469 &dscr);
470 }
471
472 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
473 uint32_t opcode, uint32_t *data)
474 {
475 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
476 int retval;
477 uint32_t dscr = DSCR_INSTR_COMP;
478
479 /* the opcode, writing data to DCC */
480 retval = cortex_a8_exec_opcode(
481 a8->armv7a_common.armv4_5_common.target,
482 opcode,
483 &dscr);
484
485 return cortex_a8_read_dcc(a8, data, &dscr);
486 }
487
488
489 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
490 uint32_t opcode, uint32_t *data)
491 {
492 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
493 uint32_t dscr = DSCR_INSTR_COMP;
494 int retval;
495
496 /* the opcode, writing data to R0 */
497 retval = cortex_a8_exec_opcode(
498 a8->armv7a_common.armv4_5_common.target,
499 opcode,
500 &dscr);
501
502 /* write R0 to DCC */
503 retval = cortex_a8_exec_opcode(
504 a8->armv7a_common.armv4_5_common.target,
505 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
506 &dscr);
507
508 return cortex_a8_read_dcc(a8, data, &dscr);
509 }
510
511 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
512 uint32_t addr, uint32_t control)
513 {
514 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
515 uint32_t vr = a8->armv7a_common.debug_base;
516 uint32_t cr = a8->armv7a_common.debug_base;
517 int retval;
518
519 switch (index_t) {
520 case 0 ... 15: /* breakpoints */
521 vr += CPUDBG_BVR_BASE;
522 cr += CPUDBG_BCR_BASE;
523 break;
524 case 16 ... 31: /* watchpoints */
525 vr += CPUDBG_WVR_BASE;
526 cr += CPUDBG_WCR_BASE;
527 index_t -= 16;
528 break;
529 default:
530 return ERROR_FAIL;
531 }
532 vr += 4 * index_t;
533 cr += 4 * index_t;
534
535 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
536 (unsigned) vr, (unsigned) cr);
537
538 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
539 vr, addr);
540 if (retval != ERROR_OK)
541 return retval;
542 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
543 cr, control);
544 return retval;
545 }
546
547 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
548 {
549 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
550 uint32_t cr;
551
552 switch (index_t) {
553 case 0 ... 15:
554 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
555 break;
556 case 16 ... 31:
557 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
558 index_t -= 16;
559 break;
560 default:
561 return ERROR_FAIL;
562 }
563 cr += 4 * index_t;
564
565 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
566
567 /* clear control register */
568 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
569 }
570
571 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
572 {
573 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
574 int retval;
575
576 dpm->arm = &a8->armv7a_common.armv4_5_common;
577 dpm->didr = didr;
578
579 dpm->prepare = cortex_a8_dpm_prepare;
580 dpm->finish = cortex_a8_dpm_finish;
581
582 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
583 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
584 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
585
586 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
587 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
588
589 dpm->bpwp_enable = cortex_a8_bpwp_enable;
590 dpm->bpwp_disable = cortex_a8_bpwp_disable;
591
592 retval = arm_dpm_setup(dpm);
593 if (retval == ERROR_OK)
594 retval = arm_dpm_initialize(dpm);
595
596 return retval;
597 }
598
599
600 /*
601 * Cortex-A8 Run control
602 */
603
604 static int cortex_a8_poll(struct target *target)
605 {
606 int retval = ERROR_OK;
607 uint32_t dscr;
608 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
609 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
610 struct adiv5_dap *swjdp = &armv7a->dap;
611 enum target_state prev_target_state = target->state;
612 uint8_t saved_apsel = dap_ap_get_select(swjdp);
613
614 dap_ap_select(swjdp, swjdp_debugap);
615 retval = mem_ap_read_atomic_u32(swjdp,
616 armv7a->debug_base + CPUDBG_DSCR, &dscr);
617 if (retval != ERROR_OK)
618 {
619 dap_ap_select(swjdp, saved_apsel);
620 return retval;
621 }
622 cortex_a8->cpudbg_dscr = dscr;
623
624 if ((dscr & 0x3) == 0x3)
625 {
626 if (prev_target_state != TARGET_HALTED)
627 {
628 /* We have a halting debug event */
629 LOG_DEBUG("Target halted");
630 target->state = TARGET_HALTED;
631 if ((prev_target_state == TARGET_RUNNING)
632 || (prev_target_state == TARGET_RESET))
633 {
634 retval = cortex_a8_debug_entry(target);
635 if (retval != ERROR_OK)
636 return retval;
637
638 target_call_event_callbacks(target,
639 TARGET_EVENT_HALTED);
640 }
641 if (prev_target_state == TARGET_DEBUG_RUNNING)
642 {
643 LOG_DEBUG(" ");
644
645 retval = cortex_a8_debug_entry(target);
646 if (retval != ERROR_OK)
647 return retval;
648
649 target_call_event_callbacks(target,
650 TARGET_EVENT_DEBUG_HALTED);
651 }
652 }
653 }
654 else if ((dscr & 0x3) == 0x2)
655 {
656 target->state = TARGET_RUNNING;
657 }
658 else
659 {
660 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
661 target->state = TARGET_UNKNOWN;
662 }
663
664 dap_ap_select(swjdp, saved_apsel);
665
666 return retval;
667 }
668
669 static int cortex_a8_halt(struct target *target)
670 {
671 int retval = ERROR_OK;
672 uint32_t dscr;
673 struct armv7a_common *armv7a = target_to_armv7a(target);
674 struct adiv5_dap *swjdp = &armv7a->dap;
675 uint8_t saved_apsel = dap_ap_get_select(swjdp);
676 dap_ap_select(swjdp, swjdp_debugap);
677
678 /*
679 * Tell the core to be halted by writing DRCR with 0x1
680 * and then wait for the core to be halted.
681 */
682 retval = mem_ap_write_atomic_u32(swjdp,
683 armv7a->debug_base + CPUDBG_DRCR, 0x1);
684 if (retval != ERROR_OK)
685 goto out;
686
687 /*
688 * enter halting debug mode
689 */
690 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
691 if (retval != ERROR_OK)
692 goto out;
693
694 retval = mem_ap_write_atomic_u32(swjdp,
695 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
696 if (retval != ERROR_OK)
697 goto out;
698
699 long long then = timeval_ms();
700 for (;;)
701 {
702 retval = mem_ap_read_atomic_u32(swjdp,
703 armv7a->debug_base + CPUDBG_DSCR, &dscr);
704 if (retval != ERROR_OK)
705 goto out;
706 if ((dscr & DSCR_CORE_HALTED) != 0)
707 {
708 break;
709 }
710 if (timeval_ms() > then + 1000)
711 {
712 LOG_ERROR("Timeout waiting for halt");
713 return ERROR_FAIL;
714 }
715 }
716
717 target->debug_reason = DBG_REASON_DBGRQ;
718
719 out:
720 dap_ap_select(swjdp, saved_apsel);
721 return retval;
722 }
723
724 static int cortex_a8_resume(struct target *target, int current,
725 uint32_t address, int handle_breakpoints, int debug_execution)
726 {
727 struct armv7a_common *armv7a = target_to_armv7a(target);
728 struct arm *armv4_5 = &armv7a->armv4_5_common;
729 struct adiv5_dap *swjdp = &armv7a->dap;
730 int retval;
731
732 // struct breakpoint *breakpoint = NULL;
733 uint32_t resume_pc, dscr;
734
735 uint8_t saved_apsel = dap_ap_get_select(swjdp);
736 dap_ap_select(swjdp, swjdp_debugap);
737
738 if (!debug_execution)
739 target_free_all_working_areas(target);
740
741 #if 0
742 if (debug_execution)
743 {
744 /* Disable interrupts */
745 /* We disable interrupts in the PRIMASK register instead of
746 * masking with C_MASKINTS,
747 * This is probably the same issue as Cortex-M3 Errata 377493:
748 * C_MASKINTS in parallel with disabled interrupts can cause
749 * local faults to not be taken. */
750 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
751 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
752 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
753
754 /* Make sure we are in Thumb mode */
755 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
756 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
757 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
758 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
759 }
760 #endif
761
762 /* current = 1: continue on current pc, otherwise continue at <address> */
763 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
764 if (!current)
765 resume_pc = address;
766
767 /* Make sure that the Armv7 gdb thumb fixups does not
768 * kill the return address
769 */
770 switch (armv4_5->core_state)
771 {
772 case ARM_STATE_ARM:
773 resume_pc &= 0xFFFFFFFC;
774 break;
775 case ARM_STATE_THUMB:
776 case ARM_STATE_THUMB_EE:
777 /* When the return address is loaded into PC
778 * bit 0 must be 1 to stay in Thumb state
779 */
780 resume_pc |= 0x1;
781 break;
782 case ARM_STATE_JAZELLE:
783 LOG_ERROR("How do I resume into Jazelle state??");
784 return ERROR_FAIL;
785 }
786 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
787 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
788 armv4_5->pc->dirty = 1;
789 armv4_5->pc->valid = 1;
790
791 cortex_a8_restore_context(target, handle_breakpoints);
792
793 #if 0
794 /* the front-end may request us not to handle breakpoints */
795 if (handle_breakpoints)
796 {
797 /* Single step past breakpoint at current address */
798 if ((breakpoint = breakpoint_find(target, resume_pc)))
799 {
800 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
801 cortex_m3_unset_breakpoint(target, breakpoint);
802 cortex_m3_single_step_core(target);
803 cortex_m3_set_breakpoint(target, breakpoint);
804 }
805 }
806
807 #endif
808 /* Restart core and wait for it to be started
809 * NOTE: this clears DSCR_ITR_EN and other bits.
810 *
811 * REVISIT: for single stepping, we probably want to
812 * disable IRQs by default, with optional override...
813 */
814 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
815 if (retval != ERROR_OK)
816 return retval;
817
818 long long then = timeval_ms();
819 for (;;)
820 {
821 retval = mem_ap_read_atomic_u32(swjdp,
822 armv7a->debug_base + CPUDBG_DSCR, &dscr);
823 if (retval != ERROR_OK)
824 return retval;
825 if ((dscr & DSCR_CORE_RESTARTED) != 0)
826 break;
827 if (timeval_ms() > then + 1000)
828 {
829 LOG_ERROR("Timeout waiting for resume");
830 return ERROR_FAIL;
831 }
832 }
833
834 target->debug_reason = DBG_REASON_NOTHALTED;
835 target->state = TARGET_RUNNING;
836
837 /* registers are now invalid */
838 register_cache_invalidate(armv4_5->core_cache);
839
840 if (!debug_execution)
841 {
842 target->state = TARGET_RUNNING;
843 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
844 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
845 }
846 else
847 {
848 target->state = TARGET_DEBUG_RUNNING;
849 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
850 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
851 }
852
853 dap_ap_select(swjdp, saved_apsel);
854
855 return ERROR_OK;
856 }
857
858 static int cortex_a8_debug_entry(struct target *target)
859 {
860 int i;
861 uint32_t regfile[16], cpsr, dscr;
862 int retval = ERROR_OK;
863 struct working_area *regfile_working_area = NULL;
864 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
865 struct armv7a_common *armv7a = target_to_armv7a(target);
866 struct arm *armv4_5 = &armv7a->armv4_5_common;
867 struct adiv5_dap *swjdp = &armv7a->dap;
868 struct reg *reg;
869
870 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
871
872 /* REVISIT surely we should not re-read DSCR !! */
873 retval = mem_ap_read_atomic_u32(swjdp,
874 armv7a->debug_base + CPUDBG_DSCR, &dscr);
875 if (retval != ERROR_OK)
876 return retval;
877
878 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
879 * imprecise data aborts get discarded by issuing a Data
880 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
881 */
882
883 /* Enable the ITR execution once we are in debug mode */
884 dscr |= DSCR_ITR_EN;
885 retval = mem_ap_write_atomic_u32(swjdp,
886 armv7a->debug_base + CPUDBG_DSCR, dscr);
887 if (retval != ERROR_OK)
888 return retval;
889
890 /* Examine debug reason */
891 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
892
893 /* save address of instruction that triggered the watchpoint? */
894 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
895 uint32_t wfar;
896
897 retval = mem_ap_read_atomic_u32(swjdp,
898 armv7a->debug_base + CPUDBG_WFAR,
899 &wfar);
900 if (retval != ERROR_OK)
901 return retval;
902 arm_dpm_report_wfar(&armv7a->dpm, wfar);
903 }
904
905 /* REVISIT fast_reg_read is never set ... */
906
907 /* Examine target state and mode */
908 if (cortex_a8->fast_reg_read)
909 target_alloc_working_area(target, 64, &regfile_working_area);
910
911 /* First load register acessible through core debug port*/
912 if (!regfile_working_area)
913 {
914 retval = arm_dpm_read_current_registers(&armv7a->dpm);
915 }
916 else
917 {
918 dap_ap_select(swjdp, swjdp_memoryap);
919 cortex_a8_read_regs_through_mem(target,
920 regfile_working_area->address, regfile);
921 dap_ap_select(swjdp, swjdp_memoryap);
922 target_free_working_area(target, regfile_working_area);
923
924 /* read Current PSR */
925 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
926 dap_ap_select(swjdp, swjdp_debugap);
927 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
928
929 arm_set_cpsr(armv4_5, cpsr);
930
931 /* update cache */
932 for (i = 0; i <= ARM_PC; i++)
933 {
934 reg = arm_reg_current(armv4_5, i);
935
936 buf_set_u32(reg->value, 0, 32, regfile[i]);
937 reg->valid = 1;
938 reg->dirty = 0;
939 }
940
941 /* Fixup PC Resume Address */
942 if (cpsr & (1 << 5))
943 {
944 // T bit set for Thumb or ThumbEE state
945 regfile[ARM_PC] -= 4;
946 }
947 else
948 {
949 // ARM state
950 regfile[ARM_PC] -= 8;
951 }
952
953 reg = armv4_5->pc;
954 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
955 reg->dirty = reg->valid;
956 }
957
958 #if 0
959 /* TODO, Move this */
960 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
961 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
962 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
963
964 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
965 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
966
967 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
968 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
969 #endif
970
971 /* Are we in an exception handler */
972 // armv4_5->exception_number = 0;
973 if (armv7a->post_debug_entry)
974 armv7a->post_debug_entry(target);
975
976 return retval;
977 }
978
979 static void cortex_a8_post_debug_entry(struct target *target)
980 {
981 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
982 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
983 int retval;
984
985 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
986 retval = armv7a->armv4_5_common.mrc(target, 15,
987 0, 0, /* op1, op2 */
988 1, 0, /* CRn, CRm */
989 &cortex_a8->cp15_control_reg);
990 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
991
992 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
993 {
994 uint32_t cache_type_reg;
995
996 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
997 retval = armv7a->armv4_5_common.mrc(target, 15,
998 0, 1, /* op1, op2 */
999 0, 0, /* CRn, CRm */
1000 &cache_type_reg);
1001 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1002
1003 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1004 armv4_5_identify_cache(cache_type_reg,
1005 &armv7a->armv4_5_mmu.armv4_5_cache);
1006 }
1007
1008 armv7a->armv4_5_mmu.mmu_enabled =
1009 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1010 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1011 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1012 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1013 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1014
1015
1016 }
1017
1018 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1019 int handle_breakpoints)
1020 {
1021 struct armv7a_common *armv7a = target_to_armv7a(target);
1022 struct arm *armv4_5 = &armv7a->armv4_5_common;
1023 struct breakpoint *breakpoint = NULL;
1024 struct breakpoint stepbreakpoint;
1025 struct reg *r;
1026 int retval;
1027
1028 int timeout = 100;
1029
1030 if (target->state != TARGET_HALTED)
1031 {
1032 LOG_WARNING("target not halted");
1033 return ERROR_TARGET_NOT_HALTED;
1034 }
1035
1036 /* current = 1: continue on current pc, otherwise continue at <address> */
1037 r = armv4_5->pc;
1038 if (!current)
1039 {
1040 buf_set_u32(r->value, 0, 32, address);
1041 }
1042 else
1043 {
1044 address = buf_get_u32(r->value, 0, 32);
1045 }
1046
1047 /* The front-end may request us not to handle breakpoints.
1048 * But since Cortex-A8 uses breakpoint for single step,
1049 * we MUST handle breakpoints.
1050 */
1051 handle_breakpoints = 1;
1052 if (handle_breakpoints) {
1053 breakpoint = breakpoint_find(target, address);
1054 if (breakpoint)
1055 cortex_a8_unset_breakpoint(target, breakpoint);
1056 }
1057
1058 /* Setup single step breakpoint */
1059 stepbreakpoint.address = address;
1060 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1061 ? 2 : 4;
1062 stepbreakpoint.type = BKPT_HARD;
1063 stepbreakpoint.set = 0;
1064
1065 /* Break on IVA mismatch */
1066 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1067
1068 target->debug_reason = DBG_REASON_SINGLESTEP;
1069
1070 retval = cortex_a8_resume(target, 1, address, 0, 0);
1071 if (retval != ERROR_OK)
1072 return retval;
1073
1074 while (target->state != TARGET_HALTED)
1075 {
1076 retval = cortex_a8_poll(target);
1077 if (retval != ERROR_OK)
1078 return retval;
1079 if (--timeout == 0)
1080 {
1081 LOG_ERROR("timeout waiting for target halt");
1082 return ERROR_FAIL;
1083 }
1084 }
1085
1086 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1087 if (timeout > 0)
1088 target->debug_reason = DBG_REASON_BREAKPOINT;
1089
1090 if (breakpoint)
1091 cortex_a8_set_breakpoint(target, breakpoint, 0);
1092
1093 if (target->state != TARGET_HALTED)
1094 LOG_DEBUG("target stepped");
1095
1096 return ERROR_OK;
1097 }
1098
1099 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1100 {
1101 struct armv7a_common *armv7a = target_to_armv7a(target);
1102
1103 LOG_DEBUG(" ");
1104
1105 if (armv7a->pre_restore_context)
1106 armv7a->pre_restore_context(target);
1107
1108 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1109
1110 return ERROR_OK;
1111 }
1112
1113
1114 /*
1115 * Cortex-A8 Breakpoint and watchpoint functions
1116 */
1117
1118 /* Setup hardware Breakpoint Register Pair */
1119 static int cortex_a8_set_breakpoint(struct target *target,
1120 struct breakpoint *breakpoint, uint8_t matchmode)
1121 {
1122 int retval;
1123 int brp_i=0;
1124 uint32_t control;
1125 uint8_t byte_addr_select = 0x0F;
1126 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1127 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1128 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1129
1130 if (breakpoint->set)
1131 {
1132 LOG_WARNING("breakpoint already set");
1133 return ERROR_OK;
1134 }
1135
1136 if (breakpoint->type == BKPT_HARD)
1137 {
1138 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1139 brp_i++ ;
1140 if (brp_i >= cortex_a8->brp_num)
1141 {
1142 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1143 return ERROR_FAIL;
1144 }
1145 breakpoint->set = brp_i + 1;
1146 if (breakpoint->length == 2)
1147 {
1148 byte_addr_select = (3 << (breakpoint->address & 0x02));
1149 }
1150 control = ((matchmode & 0x7) << 20)
1151 | (byte_addr_select << 5)
1152 | (3 << 1) | 1;
1153 brp_list[brp_i].used = 1;
1154 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1155 brp_list[brp_i].control = control;
1156 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1157 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1158 brp_list[brp_i].value);
1159 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1160 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1161 brp_list[brp_i].control);
1162 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1163 brp_list[brp_i].control,
1164 brp_list[brp_i].value);
1165 }
1166 else if (breakpoint->type == BKPT_SOFT)
1167 {
1168 uint8_t code[4];
1169 if (breakpoint->length == 2)
1170 {
1171 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1172 }
1173 else
1174 {
1175 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1176 }
1177 retval = target->type->read_memory(target,
1178 breakpoint->address & 0xFFFFFFFE,
1179 breakpoint->length, 1,
1180 breakpoint->orig_instr);
1181 if (retval != ERROR_OK)
1182 return retval;
1183 retval = target->type->write_memory(target,
1184 breakpoint->address & 0xFFFFFFFE,
1185 breakpoint->length, 1, code);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 breakpoint->set = 0x11; /* Any nice value but 0 */
1189 }
1190
1191 return ERROR_OK;
1192 }
1193
1194 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1195 {
1196 int retval;
1197 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1198 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1199 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1200
1201 if (!breakpoint->set)
1202 {
1203 LOG_WARNING("breakpoint not set");
1204 return ERROR_OK;
1205 }
1206
1207 if (breakpoint->type == BKPT_HARD)
1208 {
1209 int brp_i = breakpoint->set - 1;
1210 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1211 {
1212 LOG_DEBUG("Invalid BRP number in breakpoint");
1213 return ERROR_OK;
1214 }
1215 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1216 brp_list[brp_i].control, brp_list[brp_i].value);
1217 brp_list[brp_i].used = 0;
1218 brp_list[brp_i].value = 0;
1219 brp_list[brp_i].control = 0;
1220 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1221 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1222 brp_list[brp_i].control);
1223 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1224 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1225 brp_list[brp_i].value);
1226 }
1227 else
1228 {
1229 /* restore original instruction (kept in target endianness) */
1230 if (breakpoint->length == 4)
1231 {
1232 retval = target->type->write_memory(target,
1233 breakpoint->address & 0xFFFFFFFE,
1234 4, 1, breakpoint->orig_instr);
1235 if (retval != ERROR_OK)
1236 return retval;
1237 }
1238 else
1239 {
1240 retval = target->type->write_memory(target,
1241 breakpoint->address & 0xFFFFFFFE,
1242 2, 1, breakpoint->orig_instr);
1243 if (retval != ERROR_OK)
1244 return retval;
1245 }
1246 }
1247 breakpoint->set = 0;
1248
1249 return ERROR_OK;
1250 }
1251
1252 static int cortex_a8_add_breakpoint(struct target *target,
1253 struct breakpoint *breakpoint)
1254 {
1255 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1256
1257 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1258 {
1259 LOG_INFO("no hardware breakpoint available");
1260 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1261 }
1262
1263 if (breakpoint->type == BKPT_HARD)
1264 cortex_a8->brp_num_available--;
1265 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1266
1267 return ERROR_OK;
1268 }
1269
1270 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1271 {
1272 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1273
1274 #if 0
1275 /* It is perfectly possible to remove breakpoints while the target is running */
1276 if (target->state != TARGET_HALTED)
1277 {
1278 LOG_WARNING("target not halted");
1279 return ERROR_TARGET_NOT_HALTED;
1280 }
1281 #endif
1282
1283 if (breakpoint->set)
1284 {
1285 cortex_a8_unset_breakpoint(target, breakpoint);
1286 if (breakpoint->type == BKPT_HARD)
1287 cortex_a8->brp_num_available++ ;
1288 }
1289
1290
1291 return ERROR_OK;
1292 }
1293
1294
1295
1296 /*
1297 * Cortex-A8 Reset functions
1298 */
1299
1300 static int cortex_a8_assert_reset(struct target *target)
1301 {
1302 struct armv7a_common *armv7a = target_to_armv7a(target);
1303
1304 LOG_DEBUG(" ");
1305
1306 /* FIXME when halt is requested, make it work somehow... */
1307
1308 /* Issue some kind of warm reset. */
1309 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1310 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1311 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1312 /* REVISIT handle "pulls" cases, if there's
1313 * hardware that needs them to work.
1314 */
1315 jtag_add_reset(0, 1);
1316 } else {
1317 LOG_ERROR("%s: how to reset?", target_name(target));
1318 return ERROR_FAIL;
1319 }
1320
1321 /* registers are now invalid */
1322 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1323
1324 target->state = TARGET_RESET;
1325
1326 return ERROR_OK;
1327 }
1328
1329 static int cortex_a8_deassert_reset(struct target *target)
1330 {
1331 int retval;
1332
1333 LOG_DEBUG(" ");
1334
1335 /* be certain SRST is off */
1336 jtag_add_reset(0, 0);
1337
1338 retval = cortex_a8_poll(target);
1339 if (retval != ERROR_OK)
1340 return retval;
1341
1342 if (target->reset_halt) {
1343 if (target->state != TARGET_HALTED) {
1344 LOG_WARNING("%s: ran after reset and before halt ...",
1345 target_name(target));
1346 if ((retval = target_halt(target)) != ERROR_OK)
1347 return retval;
1348 }
1349 }
1350
1351 return ERROR_OK;
1352 }
1353
1354 /*
1355 * Cortex-A8 Memory access
1356 *
1357 * This is same Cortex M3 but we must also use the correct
1358 * ap number for every access.
1359 */
1360
1361 static int cortex_a8_read_phys_memory(struct target *target,
1362 uint32_t address, uint32_t size,
1363 uint32_t count, uint8_t *buffer)
1364 {
1365 struct armv7a_common *armv7a = target_to_armv7a(target);
1366 struct adiv5_dap *swjdp = &armv7a->dap;
1367 int retval = ERROR_INVALID_ARGUMENTS;
1368
1369 /* cortex_a8 handles unaligned memory access */
1370
1371 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1372 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1373 if (count && buffer) {
1374 switch (size) {
1375 case 4:
1376 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1377 break;
1378 case 2:
1379 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1380 break;
1381 case 1:
1382 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1383 break;
1384 }
1385 }
1386
1387 return retval;
1388 }
1389
1390 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1391 uint32_t size, uint32_t count, uint8_t *buffer)
1392 {
1393 int enabled = 0;
1394 uint32_t virt, phys;
1395 int retval;
1396
1397 /* cortex_a8 handles unaligned memory access */
1398
1399 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1400 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1401 retval = cortex_a8_mmu(target, &enabled);
1402 if (retval != ERROR_OK)
1403 return retval;
1404
1405 if(enabled)
1406 {
1407 virt = address;
1408 cortex_a8_virt2phys(target, virt, &phys);
1409 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1410 address = phys;
1411 }
1412
1413 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1414 }
1415
1416 static int cortex_a8_write_phys_memory(struct target *target,
1417 uint32_t address, uint32_t size,
1418 uint32_t count, uint8_t *buffer)
1419 {
1420 struct armv7a_common *armv7a = target_to_armv7a(target);
1421 struct adiv5_dap *swjdp = &armv7a->dap;
1422 int retval = ERROR_INVALID_ARGUMENTS;
1423
1424 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1425
1426 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1427 if (count && buffer) {
1428 switch (size) {
1429 case 4:
1430 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1431 break;
1432 case 2:
1433 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1434 break;
1435 case 1:
1436 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1437 break;
1438 }
1439 }
1440
1441 /* REVISIT this op is generic ARMv7-A/R stuff */
1442 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1443 {
1444 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1445
1446 retval = dpm->prepare(dpm);
1447 if (retval != ERROR_OK)
1448 return retval;
1449
1450 /* The Cache handling will NOT work with MMU active, the
1451 * wrong addresses will be invalidated!
1452 *
1453 * For both ICache and DCache, walk all cache lines in the
1454 * address range. Cortex-A8 has fixed 64 byte line length.
1455 *
1456 * REVISIT per ARMv7, these may trigger watchpoints ...
1457 */
1458
1459 /* invalidate I-Cache */
1460 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1461 {
1462 /* ICIMVAU - Invalidate Cache single entry
1463 * with MVA to PoU
1464 * MCR p15, 0, r0, c7, c5, 1
1465 */
1466 for (uint32_t cacheline = address;
1467 cacheline < address + size * count;
1468 cacheline += 64) {
1469 retval = dpm->instr_write_data_r0(dpm,
1470 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1471 cacheline);
1472 }
1473 }
1474
1475 /* invalidate D-Cache */
1476 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1477 {
1478 /* DCIMVAC - Invalidate data Cache line
1479 * with MVA to PoC
1480 * MCR p15, 0, r0, c7, c6, 1
1481 */
1482 for (uint32_t cacheline = address;
1483 cacheline < address + size * count;
1484 cacheline += 64) {
1485 retval = dpm->instr_write_data_r0(dpm,
1486 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1487 cacheline);
1488 }
1489 }
1490
1491 /* (void) */ dpm->finish(dpm);
1492 }
1493
1494 return retval;
1495 }
1496
1497 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1498 uint32_t size, uint32_t count, uint8_t *buffer)
1499 {
1500 int enabled = 0;
1501 uint32_t virt, phys;
1502 int retval;
1503
1504 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1505
1506 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1507 retval = cortex_a8_mmu(target, &enabled);
1508 if (retval != ERROR_OK)
1509 return retval;
1510 if(enabled)
1511 {
1512 virt = address;
1513 cortex_a8_virt2phys(target, virt, &phys);
1514 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1515 address = phys;
1516 }
1517
1518 return cortex_a8_write_phys_memory(target, address, size,
1519 count, buffer);
1520 }
1521
1522 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1523 uint32_t count, uint8_t *buffer)
1524 {
1525 return cortex_a8_write_memory(target, address, 4, count, buffer);
1526 }
1527
1528
1529 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1530 {
1531 #if 0
1532 u16 dcrdr;
1533
1534 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1535 *ctrl = (uint8_t)dcrdr;
1536 *value = (uint8_t)(dcrdr >> 8);
1537
1538 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1539
1540 /* write ack back to software dcc register
1541 * signify we have read data */
1542 if (dcrdr & (1 << 0))
1543 {
1544 dcrdr = 0;
1545 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1546 }
1547 #endif
1548 return ERROR_OK;
1549 }
1550
1551
1552 static int cortex_a8_handle_target_request(void *priv)
1553 {
1554 struct target *target = priv;
1555 struct armv7a_common *armv7a = target_to_armv7a(target);
1556 struct adiv5_dap *swjdp = &armv7a->dap;
1557
1558 if (!target_was_examined(target))
1559 return ERROR_OK;
1560 if (!target->dbg_msg_enabled)
1561 return ERROR_OK;
1562
1563 if (target->state == TARGET_RUNNING)
1564 {
1565 uint8_t data = 0;
1566 uint8_t ctrl = 0;
1567
1568 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1569
1570 /* check if we have data */
1571 if (ctrl & (1 << 0))
1572 {
1573 uint32_t request;
1574
1575 /* we assume target is quick enough */
1576 request = data;
1577 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1578 request |= (data << 8);
1579 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1580 request |= (data << 16);
1581 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1582 request |= (data << 24);
1583 target_request(target, request);
1584 }
1585 }
1586
1587 return ERROR_OK;
1588 }
1589
1590 /*
1591 * Cortex-A8 target information and configuration
1592 */
1593
1594 static int cortex_a8_examine_first(struct target *target)
1595 {
1596 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1597 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1598 struct adiv5_dap *swjdp = &armv7a->dap;
1599 int i;
1600 int retval = ERROR_OK;
1601 uint32_t didr, ctypr, ttypr, cpuid;
1602
1603 /* stop assuming this is an OMAP! */
1604 LOG_DEBUG("TODO - autoconfigure");
1605
1606 /* Here we shall insert a proper ROM Table scan */
1607 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1608
1609 /* We do one extra read to ensure DAP is configured,
1610 * we call ahbap_debugport_init(swjdp) instead
1611 */
1612 retval = ahbap_debugport_init(swjdp);
1613 if (retval != ERROR_OK)
1614 return retval;
1615
1616 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1617 if (retval != ERROR_OK)
1618 return retval;
1619
1620 if ((retval = mem_ap_read_atomic_u32(swjdp,
1621 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1622 {
1623 LOG_DEBUG("Examine %s failed", "CPUID");
1624 return retval;
1625 }
1626
1627 if ((retval = mem_ap_read_atomic_u32(swjdp,
1628 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1629 {
1630 LOG_DEBUG("Examine %s failed", "CTYPR");
1631 return retval;
1632 }
1633
1634 if ((retval = mem_ap_read_atomic_u32(swjdp,
1635 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1636 {
1637 LOG_DEBUG("Examine %s failed", "TTYPR");
1638 return retval;
1639 }
1640
1641 if ((retval = mem_ap_read_atomic_u32(swjdp,
1642 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1643 {
1644 LOG_DEBUG("Examine %s failed", "DIDR");
1645 return retval;
1646 }
1647
1648 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1649 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1650 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1651 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1652
1653 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1654 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 /* Setup Breakpoint Register Pairs */
1659 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1660 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1661 cortex_a8->brp_num_available = cortex_a8->brp_num;
1662 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1663 // cortex_a8->brb_enabled = ????;
1664 for (i = 0; i < cortex_a8->brp_num; i++)
1665 {
1666 cortex_a8->brp_list[i].used = 0;
1667 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1668 cortex_a8->brp_list[i].type = BRP_NORMAL;
1669 else
1670 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1671 cortex_a8->brp_list[i].value = 0;
1672 cortex_a8->brp_list[i].control = 0;
1673 cortex_a8->brp_list[i].BRPn = i;
1674 }
1675
1676 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1677
1678 target_set_examined(target);
1679 return ERROR_OK;
1680 }
1681
1682 static int cortex_a8_examine(struct target *target)
1683 {
1684 int retval = ERROR_OK;
1685
1686 /* don't re-probe hardware after each reset */
1687 if (!target_was_examined(target))
1688 retval = cortex_a8_examine_first(target);
1689
1690 /* Configure core debug access */
1691 if (retval == ERROR_OK)
1692 retval = cortex_a8_init_debug_access(target);
1693
1694 return retval;
1695 }
1696
1697 /*
1698 * Cortex-A8 target creation and initialization
1699 */
1700
1701 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1702 struct target *target)
1703 {
1704 /* examine_first() does a bunch of this */
1705 return ERROR_OK;
1706 }
1707
1708 static int cortex_a8_init_arch_info(struct target *target,
1709 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1710 {
1711 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1712 struct arm *armv4_5 = &armv7a->armv4_5_common;
1713 struct adiv5_dap *dap = &armv7a->dap;
1714
1715 armv7a->armv4_5_common.dap = dap;
1716
1717 /* Setup struct cortex_a8_common */
1718 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1719 armv4_5->arch_info = armv7a;
1720
1721 /* prepare JTAG information for the new target */
1722 cortex_a8->jtag_info.tap = tap;
1723 cortex_a8->jtag_info.scann_size = 4;
1724
1725 /* Leave (only) generic DAP stuff for debugport_init() */
1726 dap->jtag_info = &cortex_a8->jtag_info;
1727 dap->memaccess_tck = 80;
1728
1729 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1730 dap->tar_autoincr_block = (1 << 10);
1731
1732 cortex_a8->fast_reg_read = 0;
1733
1734 /* Set default value */
1735 cortex_a8->current_address_mode = ARM_MODE_ANY;
1736
1737 /* register arch-specific functions */
1738 armv7a->examine_debug_reason = NULL;
1739
1740 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1741
1742 armv7a->pre_restore_context = NULL;
1743 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1744 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1745 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1746 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1747 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1748 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1749 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1750 armv7a->armv4_5_mmu.mmu_enabled = 0;
1751
1752
1753 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1754
1755 /* REVISIT v7a setup should be in a v7a-specific routine */
1756 arm_init_arch_info(target, armv4_5);
1757 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1758
1759 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1760
1761 return ERROR_OK;
1762 }
1763
1764 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1765 {
1766 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1767
1768 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1769
1770 return ERROR_OK;
1771 }
1772
1773 static uint32_t cortex_a8_get_ttb(struct target *target)
1774 {
1775 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1776 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1777 uint32_t ttb = 0, retval = ERROR_OK;
1778
1779 /* current_address_mode is set inside cortex_a8_virt2phys()
1780 where we can determine if address belongs to user or kernel */
1781 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1782 {
1783 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1784 retval = armv7a->armv4_5_common.mrc(target, 15,
1785 0, 1, /* op1, op2 */
1786 2, 0, /* CRn, CRm */
1787 &ttb);
1788 }
1789 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1790 {
1791 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1792 retval = armv7a->armv4_5_common.mrc(target, 15,
1793 0, 0, /* op1, op2 */
1794 2, 0, /* CRn, CRm */
1795 &ttb);
1796 }
1797 /* we don't know whose address is: user or kernel
1798 we assume that if we are in kernel mode then
1799 address belongs to kernel else if in user mode
1800 - to user */
1801 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1802 {
1803 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1804 retval = armv7a->armv4_5_common.mrc(target, 15,
1805 0, 1, /* op1, op2 */
1806 2, 0, /* CRn, CRm */
1807 &ttb);
1808 }
1809 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1810 {
1811 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1812 retval = armv7a->armv4_5_common.mrc(target, 15,
1813 0, 0, /* op1, op2 */
1814 2, 0, /* CRn, CRm */
1815 &ttb);
1816 }
1817 /* finally we don't know whose ttb to use: user or kernel */
1818 else
1819 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1820
1821 ttb &= 0xffffc000;
1822
1823 return ttb;
1824 }
1825
1826 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1827 int d_u_cache, int i_cache)
1828 {
1829 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1830 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1831 uint32_t cp15_control;
1832
1833 /* read cp15 control register */
1834 armv7a->armv4_5_common.mrc(target, 15,
1835 0, 0, /* op1, op2 */
1836 1, 0, /* CRn, CRm */
1837 &cp15_control);
1838
1839
1840 if (mmu)
1841 cp15_control &= ~0x1U;
1842
1843 if (d_u_cache)
1844 cp15_control &= ~0x4U;
1845
1846 if (i_cache)
1847 cp15_control &= ~0x1000U;
1848
1849 armv7a->armv4_5_common.mcr(target, 15,
1850 0, 0, /* op1, op2 */
1851 1, 0, /* CRn, CRm */
1852 cp15_control);
1853 }
1854
1855 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1856 int d_u_cache, int i_cache)
1857 {
1858 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1859 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1860 uint32_t cp15_control;
1861
1862 /* read cp15 control register */
1863 armv7a->armv4_5_common.mrc(target, 15,
1864 0, 0, /* op1, op2 */
1865 1, 0, /* CRn, CRm */
1866 &cp15_control);
1867
1868 if (mmu)
1869 cp15_control |= 0x1U;
1870
1871 if (d_u_cache)
1872 cp15_control |= 0x4U;
1873
1874 if (i_cache)
1875 cp15_control |= 0x1000U;
1876
1877 armv7a->armv4_5_common.mcr(target, 15,
1878 0, 0, /* op1, op2 */
1879 1, 0, /* CRn, CRm */
1880 cp15_control);
1881 }
1882
1883
1884 static int cortex_a8_mmu(struct target *target, int *enabled)
1885 {
1886 if (target->state != TARGET_HALTED) {
1887 LOG_ERROR("%s: target not halted", __func__);
1888 return ERROR_TARGET_INVALID;
1889 }
1890
1891 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1892 return ERROR_OK;
1893 }
1894
1895 static int cortex_a8_virt2phys(struct target *target,
1896 uint32_t virt, uint32_t *phys)
1897 {
1898 uint32_t cb;
1899 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1900 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1901 struct armv7a_common *armv7a = target_to_armv7a(target);
1902
1903 /* We assume that virtual address is separated
1904 between user and kernel in Linux style:
1905 0x00000000-0xbfffffff - User space
1906 0xc0000000-0xffffffff - Kernel space */
1907 if( virt < 0xc0000000 ) /* Linux user space */
1908 cortex_a8->current_address_mode = ARM_MODE_USR;
1909 else /* Linux kernel */
1910 cortex_a8->current_address_mode = ARM_MODE_SVC;
1911 uint32_t ret;
1912 int retval = armv4_5_mmu_translate_va(target,
1913 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1914 if (retval != ERROR_OK)
1915 return retval;
1916 /* Reset the flag. We don't want someone else to use it by error */
1917 cortex_a8->current_address_mode = ARM_MODE_ANY;
1918
1919 *phys = ret;
1920 return ERROR_OK;
1921 }
1922
1923 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1924 {
1925 struct target *target = get_current_target(CMD_CTX);
1926 struct armv7a_common *armv7a = target_to_armv7a(target);
1927
1928 return armv4_5_handle_cache_info_command(CMD_CTX,
1929 &armv7a->armv4_5_mmu.armv4_5_cache);
1930 }
1931
1932
1933 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1934 {
1935 struct target *target = get_current_target(CMD_CTX);
1936 if (!target_was_examined(target))
1937 {
1938 LOG_ERROR("target not examined yet");
1939 return ERROR_FAIL;
1940 }
1941
1942 return cortex_a8_init_debug_access(target);
1943 }
1944
1945 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1946 {
1947 .name = "cache_info",
1948 .handler = cortex_a8_handle_cache_info_command,
1949 .mode = COMMAND_EXEC,
1950 .help = "display information about target caches",
1951 },
1952 {
1953 .name = "dbginit",
1954 .handler = cortex_a8_handle_dbginit_command,
1955 .mode = COMMAND_EXEC,
1956 .help = "Initialize core debug",
1957 },
1958 COMMAND_REGISTRATION_DONE
1959 };
1960 static const struct command_registration cortex_a8_command_handlers[] = {
1961 {
1962 .chain = arm_command_handlers,
1963 },
1964 {
1965 .chain = armv7a_command_handlers,
1966 },
1967 {
1968 .name = "cortex_a8",
1969 .mode = COMMAND_ANY,
1970 .help = "Cortex-A8 command group",
1971 .chain = cortex_a8_exec_command_handlers,
1972 },
1973 COMMAND_REGISTRATION_DONE
1974 };
1975
1976 struct target_type cortexa8_target = {
1977 .name = "cortex_a8",
1978
1979 .poll = cortex_a8_poll,
1980 .arch_state = armv7a_arch_state,
1981
1982 .target_request_data = NULL,
1983
1984 .halt = cortex_a8_halt,
1985 .resume = cortex_a8_resume,
1986 .step = cortex_a8_step,
1987
1988 .assert_reset = cortex_a8_assert_reset,
1989 .deassert_reset = cortex_a8_deassert_reset,
1990 .soft_reset_halt = NULL,
1991
1992 /* REVISIT allow exporting VFP3 registers ... */
1993 .get_gdb_reg_list = arm_get_gdb_reg_list,
1994
1995 .read_memory = cortex_a8_read_memory,
1996 .write_memory = cortex_a8_write_memory,
1997 .bulk_write_memory = cortex_a8_bulk_write_memory,
1998
1999 .checksum_memory = arm_checksum_memory,
2000 .blank_check_memory = arm_blank_check_memory,
2001
2002 .run_algorithm = armv4_5_run_algorithm,
2003
2004 .add_breakpoint = cortex_a8_add_breakpoint,
2005 .remove_breakpoint = cortex_a8_remove_breakpoint,
2006 .add_watchpoint = NULL,
2007 .remove_watchpoint = NULL,
2008
2009 .commands = cortex_a8_command_handlers,
2010 .target_create = cortex_a8_target_create,
2011 .init_target = cortex_a8_init_target,
2012 .examine = cortex_a8_examine,
2013
2014 .read_phys_memory = cortex_a8_read_phys_memory,
2015 .write_phys_memory = cortex_a8_write_phys_memory,
2016 .mmu = cortex_a8_mmu,
2017 .virt2phys = cortex_a8_virt2phys,
2018
2019 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)