cortex a8: mem_ap_read_buf_u32() error handling
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static uint32_t cortex_a8_get_ttb(struct target *target);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76 #define OMAP3530_DEBUG_BASE 0x54011000
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85
86 int retval;
87 uint32_t dummy;
88
89 LOG_DEBUG(" ");
90
91 /* Unlocking the debug registers for modification */
92 /* The debugport might be uninitialised so try twice */
93 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
108 if (retval != ERROR_OK)
109 return retval;
110
111 /* Enabling of instruction execution in debug mode is done in debug_entry code */
112
113 /* Resync breakpoint registers */
114
115 /* Since this is likely called from init or reset, update target state information*/
116 retval = cortex_a8_poll(target);
117
118 return retval;
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 while ((dscr & DSCR_INSTR_COMP) == 0)
140 {
141 retval = mem_ap_read_atomic_u32(swjdp,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
144 {
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
147 }
148 }
149
150 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
151 if (retval != ERROR_OK)
152 return retval;
153
154 do
155 {
156 retval = mem_ap_read_atomic_u32(swjdp,
157 armv7a->debug_base + CPUDBG_DSCR, &dscr);
158 if (retval != ERROR_OK)
159 {
160 LOG_ERROR("Could not read DSCR register");
161 return retval;
162 }
163 }
164 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
165
166 if (dscr_p)
167 *dscr_p = dscr;
168
169 return retval;
170 }
171
172 /**************************************************************************
173 Read core register with very few exec_opcode, fast but needs work_area.
174 This can cause problems with MMU active.
175 **************************************************************************/
176 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
177 uint32_t * regfile)
178 {
179 int retval = ERROR_OK;
180 struct armv7a_common *armv7a = target_to_armv7a(target);
181 struct adiv5_dap *swjdp = &armv7a->dap;
182
183 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
184 if (retval != ERROR_OK)
185 return retval;
186 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
187 if (retval != ERROR_OK)
188 return retval;
189 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
190 if (retval != ERROR_OK)
191 return retval;
192
193 dap_ap_select(swjdp, swjdp_memoryap);
194 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
195 if (retval != ERROR_OK)
196 return retval;
197 dap_ap_select(swjdp, swjdp_debugap);
198
199 return retval;
200 }
201
202 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
203 uint32_t *value, int regnum)
204 {
205 int retval = ERROR_OK;
206 uint8_t reg = regnum&0xFF;
207 uint32_t dscr = 0;
208 struct armv7a_common *armv7a = target_to_armv7a(target);
209 struct adiv5_dap *swjdp = &armv7a->dap;
210
211 if (reg > 17)
212 return retval;
213
214 if (reg < 15)
215 {
216 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
217 retval = cortex_a8_exec_opcode(target,
218 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
219 &dscr);
220 if (retval != ERROR_OK)
221 return retval;
222 }
223 else if (reg == 15)
224 {
225 /* "MOV r0, r15"; then move r0 to DCCTX */
226 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
227 if (retval != ERROR_OK)
228 return retval;
229 retval = cortex_a8_exec_opcode(target,
230 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
231 &dscr);
232 if (retval != ERROR_OK)
233 return retval;
234 }
235 else
236 {
237 /* "MRS r0, CPSR" or "MRS r0, SPSR"
238 * then move r0 to DCCTX
239 */
240 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
241 if (retval != ERROR_OK)
242 return retval;
243 retval = cortex_a8_exec_opcode(target,
244 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
245 &dscr);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* Wait for DTRRXfull then read DTRRTX */
251 while ((dscr & DSCR_DTR_TX_FULL) == 0)
252 {
253 retval = mem_ap_read_atomic_u32(swjdp,
254 armv7a->debug_base + CPUDBG_DSCR, &dscr);
255 if (retval != ERROR_OK)
256 return retval;
257 }
258
259 retval = mem_ap_read_atomic_u32(swjdp,
260 armv7a->debug_base + CPUDBG_DTRTX, value);
261 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
262
263 return retval;
264 }
265
266 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
267 uint32_t value, int regnum)
268 {
269 int retval = ERROR_OK;
270 uint8_t Rd = regnum&0xFF;
271 uint32_t dscr;
272 struct armv7a_common *armv7a = target_to_armv7a(target);
273 struct adiv5_dap *swjdp = &armv7a->dap;
274
275 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
276
277 /* Check that DCCRX is not full */
278 retval = mem_ap_read_atomic_u32(swjdp,
279 armv7a->debug_base + CPUDBG_DSCR, &dscr);
280 if (retval != ERROR_OK)
281 return retval;
282 if (dscr & DSCR_DTR_RX_FULL)
283 {
284 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
285 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
286 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
287 &dscr);
288 if (retval != ERROR_OK)
289 return retval;
290 }
291
292 if (Rd > 17)
293 return retval;
294
295 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
296 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
297 retval = mem_ap_write_u32(swjdp,
298 armv7a->debug_base + CPUDBG_DTRRX, value);
299 if (retval != ERROR_OK)
300 return retval;
301
302 if (Rd < 15)
303 {
304 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
305 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
309 }
310 else if (Rd == 15)
311 {
312 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
313 * then "mov r15, r0"
314 */
315 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
316 &dscr);
317 if (retval != ERROR_OK)
318 return retval;
319 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
320 if (retval != ERROR_OK)
321 return retval;
322 }
323 else
324 {
325 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
326 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
327 */
328 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
329 &dscr);
330 if (retval != ERROR_OK)
331 return retval;
332 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
333 &dscr);
334 if (retval != ERROR_OK)
335 return retval;
336
337 /* "Prefetch flush" after modifying execution status in CPSR */
338 if (Rd == 16)
339 {
340 retval = cortex_a8_exec_opcode(target,
341 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
342 &dscr);
343 if (retval != ERROR_OK)
344 return retval;
345 }
346 }
347
348 return retval;
349 }
350
351 /* Write to memory mapped registers directly with no cache or mmu handling */
352 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
353 {
354 int retval;
355 struct armv7a_common *armv7a = target_to_armv7a(target);
356 struct adiv5_dap *swjdp = &armv7a->dap;
357
358 retval = mem_ap_write_atomic_u32(swjdp, address, value);
359
360 return retval;
361 }
362
363 /*
364 * Cortex-A8 implementation of Debug Programmer's Model
365 *
366 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
367 * so there's no need to poll for it before executing an instruction.
368 *
369 * NOTE that in several of these cases the "stall" mode might be useful.
370 * It'd let us queue a few operations together... prepare/finish might
371 * be the places to enable/disable that mode.
372 */
373
374 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
375 {
376 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
377 }
378
379 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
380 {
381 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
382 return mem_ap_write_u32(&a8->armv7a_common.dap,
383 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
384 }
385
386 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
387 uint32_t *dscr_p)
388 {
389 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
390 uint32_t dscr = DSCR_INSTR_COMP;
391 int retval;
392
393 if (dscr_p)
394 dscr = *dscr_p;
395
396 /* Wait for DTRRXfull */
397 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
398 retval = mem_ap_read_atomic_u32(swjdp,
399 a8->armv7a_common.debug_base + CPUDBG_DSCR,
400 &dscr);
401 if (retval != ERROR_OK)
402 return retval;
403 }
404
405 retval = mem_ap_read_atomic_u32(swjdp,
406 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
407 if (retval != ERROR_OK)
408 return retval;
409 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
410
411 if (dscr_p)
412 *dscr_p = dscr;
413
414 return retval;
415 }
416
417 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
418 {
419 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
420 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
421 uint32_t dscr;
422 int retval;
423
424 /* set up invariant: INSTR_COMP is set after ever DPM operation */
425 long long then = timeval_ms();
426 for (;;)
427 {
428 retval = mem_ap_read_atomic_u32(swjdp,
429 a8->armv7a_common.debug_base + CPUDBG_DSCR,
430 &dscr);
431 if (retval != ERROR_OK)
432 return retval;
433 if ((dscr & DSCR_INSTR_COMP) != 0)
434 break;
435 if (timeval_ms() > then + 1000)
436 {
437 LOG_ERROR("Timeout waiting for dpm prepare");
438 return ERROR_FAIL;
439 }
440 }
441
442 /* this "should never happen" ... */
443 if (dscr & DSCR_DTR_RX_FULL) {
444 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
445 /* Clear DCCRX */
446 retval = cortex_a8_exec_opcode(
447 a8->armv7a_common.armv4_5_common.target,
448 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
449 &dscr);
450 if (retval != ERROR_OK)
451 return retval;
452 }
453
454 return retval;
455 }
456
457 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
458 {
459 /* REVISIT what could be done here? */
460 return ERROR_OK;
461 }
462
463 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
464 uint32_t opcode, uint32_t data)
465 {
466 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
467 int retval;
468 uint32_t dscr = DSCR_INSTR_COMP;
469
470 retval = cortex_a8_write_dcc(a8, data);
471
472 return cortex_a8_exec_opcode(
473 a8->armv7a_common.armv4_5_common.target,
474 opcode,
475 &dscr);
476 }
477
478 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
479 uint32_t opcode, uint32_t data)
480 {
481 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
482 uint32_t dscr = DSCR_INSTR_COMP;
483 int retval;
484
485 retval = cortex_a8_write_dcc(a8, data);
486
487 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
488 retval = cortex_a8_exec_opcode(
489 a8->armv7a_common.armv4_5_common.target,
490 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
491 &dscr);
492 if (retval != ERROR_OK)
493 return retval;
494
495 /* then the opcode, taking data from R0 */
496 retval = cortex_a8_exec_opcode(
497 a8->armv7a_common.armv4_5_common.target,
498 opcode,
499 &dscr);
500
501 return retval;
502 }
503
504 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
505 {
506 struct target *target = dpm->arm->target;
507 uint32_t dscr = DSCR_INSTR_COMP;
508
509 /* "Prefetch flush" after modifying execution status in CPSR */
510 return cortex_a8_exec_opcode(target,
511 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
512 &dscr);
513 }
514
515 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
516 uint32_t opcode, uint32_t *data)
517 {
518 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
519 int retval;
520 uint32_t dscr = DSCR_INSTR_COMP;
521
522 /* the opcode, writing data to DCC */
523 retval = cortex_a8_exec_opcode(
524 a8->armv7a_common.armv4_5_common.target,
525 opcode,
526 &dscr);
527 if (retval != ERROR_OK)
528 return retval;
529
530 return cortex_a8_read_dcc(a8, data, &dscr);
531 }
532
533
534 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
535 uint32_t opcode, uint32_t *data)
536 {
537 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
538 uint32_t dscr = DSCR_INSTR_COMP;
539 int retval;
540
541 /* the opcode, writing data to R0 */
542 retval = cortex_a8_exec_opcode(
543 a8->armv7a_common.armv4_5_common.target,
544 opcode,
545 &dscr);
546 if (retval != ERROR_OK)
547 return retval;
548
549 /* write R0 to DCC */
550 retval = cortex_a8_exec_opcode(
551 a8->armv7a_common.armv4_5_common.target,
552 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
553 &dscr);
554 if (retval != ERROR_OK)
555 return retval;
556
557 return cortex_a8_read_dcc(a8, data, &dscr);
558 }
559
560 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
561 uint32_t addr, uint32_t control)
562 {
563 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
564 uint32_t vr = a8->armv7a_common.debug_base;
565 uint32_t cr = a8->armv7a_common.debug_base;
566 int retval;
567
568 switch (index_t) {
569 case 0 ... 15: /* breakpoints */
570 vr += CPUDBG_BVR_BASE;
571 cr += CPUDBG_BCR_BASE;
572 break;
573 case 16 ... 31: /* watchpoints */
574 vr += CPUDBG_WVR_BASE;
575 cr += CPUDBG_WCR_BASE;
576 index_t -= 16;
577 break;
578 default:
579 return ERROR_FAIL;
580 }
581 vr += 4 * index_t;
582 cr += 4 * index_t;
583
584 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
585 (unsigned) vr, (unsigned) cr);
586
587 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
588 vr, addr);
589 if (retval != ERROR_OK)
590 return retval;
591 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
592 cr, control);
593 return retval;
594 }
595
596 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
597 {
598 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
599 uint32_t cr;
600
601 switch (index_t) {
602 case 0 ... 15:
603 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
604 break;
605 case 16 ... 31:
606 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
607 index_t -= 16;
608 break;
609 default:
610 return ERROR_FAIL;
611 }
612 cr += 4 * index_t;
613
614 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
615
616 /* clear control register */
617 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
618 }
619
620 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
621 {
622 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
623 int retval;
624
625 dpm->arm = &a8->armv7a_common.armv4_5_common;
626 dpm->didr = didr;
627
628 dpm->prepare = cortex_a8_dpm_prepare;
629 dpm->finish = cortex_a8_dpm_finish;
630
631 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
632 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
633 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
634
635 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
636 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
637
638 dpm->bpwp_enable = cortex_a8_bpwp_enable;
639 dpm->bpwp_disable = cortex_a8_bpwp_disable;
640
641 retval = arm_dpm_setup(dpm);
642 if (retval == ERROR_OK)
643 retval = arm_dpm_initialize(dpm);
644
645 return retval;
646 }
647
648
649 /*
650 * Cortex-A8 Run control
651 */
652
653 static int cortex_a8_poll(struct target *target)
654 {
655 int retval = ERROR_OK;
656 uint32_t dscr;
657 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
658 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
659 struct adiv5_dap *swjdp = &armv7a->dap;
660 enum target_state prev_target_state = target->state;
661 uint8_t saved_apsel = dap_ap_get_select(swjdp);
662
663 dap_ap_select(swjdp, swjdp_debugap);
664 retval = mem_ap_read_atomic_u32(swjdp,
665 armv7a->debug_base + CPUDBG_DSCR, &dscr);
666 if (retval != ERROR_OK)
667 {
668 dap_ap_select(swjdp, saved_apsel);
669 return retval;
670 }
671 cortex_a8->cpudbg_dscr = dscr;
672
673 if ((dscr & 0x3) == 0x3)
674 {
675 if (prev_target_state != TARGET_HALTED)
676 {
677 /* We have a halting debug event */
678 LOG_DEBUG("Target halted");
679 target->state = TARGET_HALTED;
680 if ((prev_target_state == TARGET_RUNNING)
681 || (prev_target_state == TARGET_RESET))
682 {
683 retval = cortex_a8_debug_entry(target);
684 if (retval != ERROR_OK)
685 return retval;
686
687 target_call_event_callbacks(target,
688 TARGET_EVENT_HALTED);
689 }
690 if (prev_target_state == TARGET_DEBUG_RUNNING)
691 {
692 LOG_DEBUG(" ");
693
694 retval = cortex_a8_debug_entry(target);
695 if (retval != ERROR_OK)
696 return retval;
697
698 target_call_event_callbacks(target,
699 TARGET_EVENT_DEBUG_HALTED);
700 }
701 }
702 }
703 else if ((dscr & 0x3) == 0x2)
704 {
705 target->state = TARGET_RUNNING;
706 }
707 else
708 {
709 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
710 target->state = TARGET_UNKNOWN;
711 }
712
713 dap_ap_select(swjdp, saved_apsel);
714
715 return retval;
716 }
717
718 static int cortex_a8_halt(struct target *target)
719 {
720 int retval = ERROR_OK;
721 uint32_t dscr;
722 struct armv7a_common *armv7a = target_to_armv7a(target);
723 struct adiv5_dap *swjdp = &armv7a->dap;
724 uint8_t saved_apsel = dap_ap_get_select(swjdp);
725 dap_ap_select(swjdp, swjdp_debugap);
726
727 /*
728 * Tell the core to be halted by writing DRCR with 0x1
729 * and then wait for the core to be halted.
730 */
731 retval = mem_ap_write_atomic_u32(swjdp,
732 armv7a->debug_base + CPUDBG_DRCR, 0x1);
733 if (retval != ERROR_OK)
734 goto out;
735
736 /*
737 * enter halting debug mode
738 */
739 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
740 if (retval != ERROR_OK)
741 goto out;
742
743 retval = mem_ap_write_atomic_u32(swjdp,
744 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
745 if (retval != ERROR_OK)
746 goto out;
747
748 long long then = timeval_ms();
749 for (;;)
750 {
751 retval = mem_ap_read_atomic_u32(swjdp,
752 armv7a->debug_base + CPUDBG_DSCR, &dscr);
753 if (retval != ERROR_OK)
754 goto out;
755 if ((dscr & DSCR_CORE_HALTED) != 0)
756 {
757 break;
758 }
759 if (timeval_ms() > then + 1000)
760 {
761 LOG_ERROR("Timeout waiting for halt");
762 return ERROR_FAIL;
763 }
764 }
765
766 target->debug_reason = DBG_REASON_DBGRQ;
767
768 out:
769 dap_ap_select(swjdp, saved_apsel);
770 return retval;
771 }
772
773 static int cortex_a8_resume(struct target *target, int current,
774 uint32_t address, int handle_breakpoints, int debug_execution)
775 {
776 struct armv7a_common *armv7a = target_to_armv7a(target);
777 struct arm *armv4_5 = &armv7a->armv4_5_common;
778 struct adiv5_dap *swjdp = &armv7a->dap;
779 int retval;
780
781 // struct breakpoint *breakpoint = NULL;
782 uint32_t resume_pc, dscr;
783
784 uint8_t saved_apsel = dap_ap_get_select(swjdp);
785 dap_ap_select(swjdp, swjdp_debugap);
786
787 if (!debug_execution)
788 target_free_all_working_areas(target);
789
790 #if 0
791 if (debug_execution)
792 {
793 /* Disable interrupts */
794 /* We disable interrupts in the PRIMASK register instead of
795 * masking with C_MASKINTS,
796 * This is probably the same issue as Cortex-M3 Errata 377493:
797 * C_MASKINTS in parallel with disabled interrupts can cause
798 * local faults to not be taken. */
799 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
800 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
801 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
802
803 /* Make sure we are in Thumb mode */
804 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
805 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
806 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
807 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
808 }
809 #endif
810
811 /* current = 1: continue on current pc, otherwise continue at <address> */
812 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
813 if (!current)
814 resume_pc = address;
815
816 /* Make sure that the Armv7 gdb thumb fixups does not
817 * kill the return address
818 */
819 switch (armv4_5->core_state)
820 {
821 case ARM_STATE_ARM:
822 resume_pc &= 0xFFFFFFFC;
823 break;
824 case ARM_STATE_THUMB:
825 case ARM_STATE_THUMB_EE:
826 /* When the return address is loaded into PC
827 * bit 0 must be 1 to stay in Thumb state
828 */
829 resume_pc |= 0x1;
830 break;
831 case ARM_STATE_JAZELLE:
832 LOG_ERROR("How do I resume into Jazelle state??");
833 return ERROR_FAIL;
834 }
835 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
836 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
837 armv4_5->pc->dirty = 1;
838 armv4_5->pc->valid = 1;
839
840 cortex_a8_restore_context(target, handle_breakpoints);
841
842 #if 0
843 /* the front-end may request us not to handle breakpoints */
844 if (handle_breakpoints)
845 {
846 /* Single step past breakpoint at current address */
847 if ((breakpoint = breakpoint_find(target, resume_pc)))
848 {
849 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
850 cortex_m3_unset_breakpoint(target, breakpoint);
851 cortex_m3_single_step_core(target);
852 cortex_m3_set_breakpoint(target, breakpoint);
853 }
854 }
855
856 #endif
857 /* Restart core and wait for it to be started
858 * NOTE: this clears DSCR_ITR_EN and other bits.
859 *
860 * REVISIT: for single stepping, we probably want to
861 * disable IRQs by default, with optional override...
862 */
863 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
864 if (retval != ERROR_OK)
865 return retval;
866
867 long long then = timeval_ms();
868 for (;;)
869 {
870 retval = mem_ap_read_atomic_u32(swjdp,
871 armv7a->debug_base + CPUDBG_DSCR, &dscr);
872 if (retval != ERROR_OK)
873 return retval;
874 if ((dscr & DSCR_CORE_RESTARTED) != 0)
875 break;
876 if (timeval_ms() > then + 1000)
877 {
878 LOG_ERROR("Timeout waiting for resume");
879 return ERROR_FAIL;
880 }
881 }
882
883 target->debug_reason = DBG_REASON_NOTHALTED;
884 target->state = TARGET_RUNNING;
885
886 /* registers are now invalid */
887 register_cache_invalidate(armv4_5->core_cache);
888
889 if (!debug_execution)
890 {
891 target->state = TARGET_RUNNING;
892 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
893 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
894 }
895 else
896 {
897 target->state = TARGET_DEBUG_RUNNING;
898 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
899 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
900 }
901
902 dap_ap_select(swjdp, saved_apsel);
903
904 return ERROR_OK;
905 }
906
907 static int cortex_a8_debug_entry(struct target *target)
908 {
909 int i;
910 uint32_t regfile[16], cpsr, dscr;
911 int retval = ERROR_OK;
912 struct working_area *regfile_working_area = NULL;
913 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
914 struct armv7a_common *armv7a = target_to_armv7a(target);
915 struct arm *armv4_5 = &armv7a->armv4_5_common;
916 struct adiv5_dap *swjdp = &armv7a->dap;
917 struct reg *reg;
918
919 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
920
921 /* REVISIT surely we should not re-read DSCR !! */
922 retval = mem_ap_read_atomic_u32(swjdp,
923 armv7a->debug_base + CPUDBG_DSCR, &dscr);
924 if (retval != ERROR_OK)
925 return retval;
926
927 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
928 * imprecise data aborts get discarded by issuing a Data
929 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
930 */
931
932 /* Enable the ITR execution once we are in debug mode */
933 dscr |= DSCR_ITR_EN;
934 retval = mem_ap_write_atomic_u32(swjdp,
935 armv7a->debug_base + CPUDBG_DSCR, dscr);
936 if (retval != ERROR_OK)
937 return retval;
938
939 /* Examine debug reason */
940 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
941
942 /* save address of instruction that triggered the watchpoint? */
943 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
944 uint32_t wfar;
945
946 retval = mem_ap_read_atomic_u32(swjdp,
947 armv7a->debug_base + CPUDBG_WFAR,
948 &wfar);
949 if (retval != ERROR_OK)
950 return retval;
951 arm_dpm_report_wfar(&armv7a->dpm, wfar);
952 }
953
954 /* REVISIT fast_reg_read is never set ... */
955
956 /* Examine target state and mode */
957 if (cortex_a8->fast_reg_read)
958 target_alloc_working_area(target, 64, &regfile_working_area);
959
960 /* First load register acessible through core debug port*/
961 if (!regfile_working_area)
962 {
963 retval = arm_dpm_read_current_registers(&armv7a->dpm);
964 }
965 else
966 {
967 dap_ap_select(swjdp, swjdp_memoryap);
968 cortex_a8_read_regs_through_mem(target,
969 regfile_working_area->address, regfile);
970 dap_ap_select(swjdp, swjdp_memoryap);
971 target_free_working_area(target, regfile_working_area);
972
973 /* read Current PSR */
974 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
975 if (retval != ERROR_OK)
976 return retval;
977 dap_ap_select(swjdp, swjdp_debugap);
978 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
979
980 arm_set_cpsr(armv4_5, cpsr);
981
982 /* update cache */
983 for (i = 0; i <= ARM_PC; i++)
984 {
985 reg = arm_reg_current(armv4_5, i);
986
987 buf_set_u32(reg->value, 0, 32, regfile[i]);
988 reg->valid = 1;
989 reg->dirty = 0;
990 }
991
992 /* Fixup PC Resume Address */
993 if (cpsr & (1 << 5))
994 {
995 // T bit set for Thumb or ThumbEE state
996 regfile[ARM_PC] -= 4;
997 }
998 else
999 {
1000 // ARM state
1001 regfile[ARM_PC] -= 8;
1002 }
1003
1004 reg = armv4_5->pc;
1005 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1006 reg->dirty = reg->valid;
1007 }
1008
1009 #if 0
1010 /* TODO, Move this */
1011 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1012 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1013 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1014
1015 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1016 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1017
1018 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1019 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1020 #endif
1021
1022 /* Are we in an exception handler */
1023 // armv4_5->exception_number = 0;
1024 if (armv7a->post_debug_entry)
1025 armv7a->post_debug_entry(target);
1026
1027 return retval;
1028 }
1029
1030 static void cortex_a8_post_debug_entry(struct target *target)
1031 {
1032 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1033 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1034 int retval;
1035
1036 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1037 retval = armv7a->armv4_5_common.mrc(target, 15,
1038 0, 0, /* op1, op2 */
1039 1, 0, /* CRn, CRm */
1040 &cortex_a8->cp15_control_reg);
1041 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1042
1043 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1044 {
1045 uint32_t cache_type_reg;
1046
1047 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1048 retval = armv7a->armv4_5_common.mrc(target, 15,
1049 0, 1, /* op1, op2 */
1050 0, 0, /* CRn, CRm */
1051 &cache_type_reg);
1052 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1053
1054 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1055 armv4_5_identify_cache(cache_type_reg,
1056 &armv7a->armv4_5_mmu.armv4_5_cache);
1057 }
1058
1059 armv7a->armv4_5_mmu.mmu_enabled =
1060 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1061 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1062 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1063 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1064 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1065
1066
1067 }
1068
1069 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1070 int handle_breakpoints)
1071 {
1072 struct armv7a_common *armv7a = target_to_armv7a(target);
1073 struct arm *armv4_5 = &armv7a->armv4_5_common;
1074 struct breakpoint *breakpoint = NULL;
1075 struct breakpoint stepbreakpoint;
1076 struct reg *r;
1077 int retval;
1078
1079 int timeout = 100;
1080
1081 if (target->state != TARGET_HALTED)
1082 {
1083 LOG_WARNING("target not halted");
1084 return ERROR_TARGET_NOT_HALTED;
1085 }
1086
1087 /* current = 1: continue on current pc, otherwise continue at <address> */
1088 r = armv4_5->pc;
1089 if (!current)
1090 {
1091 buf_set_u32(r->value, 0, 32, address);
1092 }
1093 else
1094 {
1095 address = buf_get_u32(r->value, 0, 32);
1096 }
1097
1098 /* The front-end may request us not to handle breakpoints.
1099 * But since Cortex-A8 uses breakpoint for single step,
1100 * we MUST handle breakpoints.
1101 */
1102 handle_breakpoints = 1;
1103 if (handle_breakpoints) {
1104 breakpoint = breakpoint_find(target, address);
1105 if (breakpoint)
1106 cortex_a8_unset_breakpoint(target, breakpoint);
1107 }
1108
1109 /* Setup single step breakpoint */
1110 stepbreakpoint.address = address;
1111 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1112 ? 2 : 4;
1113 stepbreakpoint.type = BKPT_HARD;
1114 stepbreakpoint.set = 0;
1115
1116 /* Break on IVA mismatch */
1117 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1118
1119 target->debug_reason = DBG_REASON_SINGLESTEP;
1120
1121 retval = cortex_a8_resume(target, 1, address, 0, 0);
1122 if (retval != ERROR_OK)
1123 return retval;
1124
1125 while (target->state != TARGET_HALTED)
1126 {
1127 retval = cortex_a8_poll(target);
1128 if (retval != ERROR_OK)
1129 return retval;
1130 if (--timeout == 0)
1131 {
1132 LOG_ERROR("timeout waiting for target halt");
1133 return ERROR_FAIL;
1134 }
1135 }
1136
1137 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1138 if (timeout > 0)
1139 target->debug_reason = DBG_REASON_BREAKPOINT;
1140
1141 if (breakpoint)
1142 cortex_a8_set_breakpoint(target, breakpoint, 0);
1143
1144 if (target->state != TARGET_HALTED)
1145 LOG_DEBUG("target stepped");
1146
1147 return ERROR_OK;
1148 }
1149
1150 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1151 {
1152 struct armv7a_common *armv7a = target_to_armv7a(target);
1153
1154 LOG_DEBUG(" ");
1155
1156 if (armv7a->pre_restore_context)
1157 armv7a->pre_restore_context(target);
1158
1159 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1160
1161 return ERROR_OK;
1162 }
1163
1164
1165 /*
1166 * Cortex-A8 Breakpoint and watchpoint functions
1167 */
1168
1169 /* Setup hardware Breakpoint Register Pair */
1170 static int cortex_a8_set_breakpoint(struct target *target,
1171 struct breakpoint *breakpoint, uint8_t matchmode)
1172 {
1173 int retval;
1174 int brp_i=0;
1175 uint32_t control;
1176 uint8_t byte_addr_select = 0x0F;
1177 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1178 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1179 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1180
1181 if (breakpoint->set)
1182 {
1183 LOG_WARNING("breakpoint already set");
1184 return ERROR_OK;
1185 }
1186
1187 if (breakpoint->type == BKPT_HARD)
1188 {
1189 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1190 brp_i++ ;
1191 if (brp_i >= cortex_a8->brp_num)
1192 {
1193 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1194 return ERROR_FAIL;
1195 }
1196 breakpoint->set = brp_i + 1;
1197 if (breakpoint->length == 2)
1198 {
1199 byte_addr_select = (3 << (breakpoint->address & 0x02));
1200 }
1201 control = ((matchmode & 0x7) << 20)
1202 | (byte_addr_select << 5)
1203 | (3 << 1) | 1;
1204 brp_list[brp_i].used = 1;
1205 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1206 brp_list[brp_i].control = control;
1207 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1208 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1209 brp_list[brp_i].value);
1210 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1211 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1212 brp_list[brp_i].control);
1213 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1214 brp_list[brp_i].control,
1215 brp_list[brp_i].value);
1216 }
1217 else if (breakpoint->type == BKPT_SOFT)
1218 {
1219 uint8_t code[4];
1220 if (breakpoint->length == 2)
1221 {
1222 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1223 }
1224 else
1225 {
1226 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1227 }
1228 retval = target->type->read_memory(target,
1229 breakpoint->address & 0xFFFFFFFE,
1230 breakpoint->length, 1,
1231 breakpoint->orig_instr);
1232 if (retval != ERROR_OK)
1233 return retval;
1234 retval = target->type->write_memory(target,
1235 breakpoint->address & 0xFFFFFFFE,
1236 breakpoint->length, 1, code);
1237 if (retval != ERROR_OK)
1238 return retval;
1239 breakpoint->set = 0x11; /* Any nice value but 0 */
1240 }
1241
1242 return ERROR_OK;
1243 }
1244
1245 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1246 {
1247 int retval;
1248 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1249 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1250 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1251
1252 if (!breakpoint->set)
1253 {
1254 LOG_WARNING("breakpoint not set");
1255 return ERROR_OK;
1256 }
1257
1258 if (breakpoint->type == BKPT_HARD)
1259 {
1260 int brp_i = breakpoint->set - 1;
1261 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1262 {
1263 LOG_DEBUG("Invalid BRP number in breakpoint");
1264 return ERROR_OK;
1265 }
1266 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1267 brp_list[brp_i].control, brp_list[brp_i].value);
1268 brp_list[brp_i].used = 0;
1269 brp_list[brp_i].value = 0;
1270 brp_list[brp_i].control = 0;
1271 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1272 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1273 brp_list[brp_i].control);
1274 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1275 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1276 brp_list[brp_i].value);
1277 }
1278 else
1279 {
1280 /* restore original instruction (kept in target endianness) */
1281 if (breakpoint->length == 4)
1282 {
1283 retval = target->type->write_memory(target,
1284 breakpoint->address & 0xFFFFFFFE,
1285 4, 1, breakpoint->orig_instr);
1286 if (retval != ERROR_OK)
1287 return retval;
1288 }
1289 else
1290 {
1291 retval = target->type->write_memory(target,
1292 breakpoint->address & 0xFFFFFFFE,
1293 2, 1, breakpoint->orig_instr);
1294 if (retval != ERROR_OK)
1295 return retval;
1296 }
1297 }
1298 breakpoint->set = 0;
1299
1300 return ERROR_OK;
1301 }
1302
1303 static int cortex_a8_add_breakpoint(struct target *target,
1304 struct breakpoint *breakpoint)
1305 {
1306 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1307
1308 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1309 {
1310 LOG_INFO("no hardware breakpoint available");
1311 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1312 }
1313
1314 if (breakpoint->type == BKPT_HARD)
1315 cortex_a8->brp_num_available--;
1316 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1317
1318 return ERROR_OK;
1319 }
1320
1321 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1322 {
1323 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1324
1325 #if 0
1326 /* It is perfectly possible to remove breakpoints while the target is running */
1327 if (target->state != TARGET_HALTED)
1328 {
1329 LOG_WARNING("target not halted");
1330 return ERROR_TARGET_NOT_HALTED;
1331 }
1332 #endif
1333
1334 if (breakpoint->set)
1335 {
1336 cortex_a8_unset_breakpoint(target, breakpoint);
1337 if (breakpoint->type == BKPT_HARD)
1338 cortex_a8->brp_num_available++ ;
1339 }
1340
1341
1342 return ERROR_OK;
1343 }
1344
1345
1346
1347 /*
1348 * Cortex-A8 Reset functions
1349 */
1350
1351 static int cortex_a8_assert_reset(struct target *target)
1352 {
1353 struct armv7a_common *armv7a = target_to_armv7a(target);
1354
1355 LOG_DEBUG(" ");
1356
1357 /* FIXME when halt is requested, make it work somehow... */
1358
1359 /* Issue some kind of warm reset. */
1360 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1361 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1362 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1363 /* REVISIT handle "pulls" cases, if there's
1364 * hardware that needs them to work.
1365 */
1366 jtag_add_reset(0, 1);
1367 } else {
1368 LOG_ERROR("%s: how to reset?", target_name(target));
1369 return ERROR_FAIL;
1370 }
1371
1372 /* registers are now invalid */
1373 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1374
1375 target->state = TARGET_RESET;
1376
1377 return ERROR_OK;
1378 }
1379
1380 static int cortex_a8_deassert_reset(struct target *target)
1381 {
1382 int retval;
1383
1384 LOG_DEBUG(" ");
1385
1386 /* be certain SRST is off */
1387 jtag_add_reset(0, 0);
1388
1389 retval = cortex_a8_poll(target);
1390 if (retval != ERROR_OK)
1391 return retval;
1392
1393 if (target->reset_halt) {
1394 if (target->state != TARGET_HALTED) {
1395 LOG_WARNING("%s: ran after reset and before halt ...",
1396 target_name(target));
1397 if ((retval = target_halt(target)) != ERROR_OK)
1398 return retval;
1399 }
1400 }
1401
1402 return ERROR_OK;
1403 }
1404
1405 /*
1406 * Cortex-A8 Memory access
1407 *
1408 * This is same Cortex M3 but we must also use the correct
1409 * ap number for every access.
1410 */
1411
1412 static int cortex_a8_read_phys_memory(struct target *target,
1413 uint32_t address, uint32_t size,
1414 uint32_t count, uint8_t *buffer)
1415 {
1416 struct armv7a_common *armv7a = target_to_armv7a(target);
1417 struct adiv5_dap *swjdp = &armv7a->dap;
1418 int retval = ERROR_INVALID_ARGUMENTS;
1419
1420 /* cortex_a8 handles unaligned memory access */
1421
1422 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1423 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1424 if (count && buffer) {
1425 switch (size) {
1426 case 4:
1427 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1428 break;
1429 case 2:
1430 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1431 break;
1432 case 1:
1433 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1434 break;
1435 }
1436 }
1437
1438 return retval;
1439 }
1440
1441 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1442 uint32_t size, uint32_t count, uint8_t *buffer)
1443 {
1444 int enabled = 0;
1445 uint32_t virt, phys;
1446 int retval;
1447
1448 /* cortex_a8 handles unaligned memory access */
1449
1450 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1451 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1452 retval = cortex_a8_mmu(target, &enabled);
1453 if (retval != ERROR_OK)
1454 return retval;
1455
1456 if(enabled)
1457 {
1458 virt = address;
1459 cortex_a8_virt2phys(target, virt, &phys);
1460 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1461 address = phys;
1462 }
1463
1464 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1465 }
1466
1467 static int cortex_a8_write_phys_memory(struct target *target,
1468 uint32_t address, uint32_t size,
1469 uint32_t count, uint8_t *buffer)
1470 {
1471 struct armv7a_common *armv7a = target_to_armv7a(target);
1472 struct adiv5_dap *swjdp = &armv7a->dap;
1473 int retval = ERROR_INVALID_ARGUMENTS;
1474
1475 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1476
1477 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1478 if (count && buffer) {
1479 switch (size) {
1480 case 4:
1481 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1482 break;
1483 case 2:
1484 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1485 break;
1486 case 1:
1487 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1488 break;
1489 }
1490 }
1491
1492 /* REVISIT this op is generic ARMv7-A/R stuff */
1493 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1494 {
1495 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1496
1497 retval = dpm->prepare(dpm);
1498 if (retval != ERROR_OK)
1499 return retval;
1500
1501 /* The Cache handling will NOT work with MMU active, the
1502 * wrong addresses will be invalidated!
1503 *
1504 * For both ICache and DCache, walk all cache lines in the
1505 * address range. Cortex-A8 has fixed 64 byte line length.
1506 *
1507 * REVISIT per ARMv7, these may trigger watchpoints ...
1508 */
1509
1510 /* invalidate I-Cache */
1511 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1512 {
1513 /* ICIMVAU - Invalidate Cache single entry
1514 * with MVA to PoU
1515 * MCR p15, 0, r0, c7, c5, 1
1516 */
1517 for (uint32_t cacheline = address;
1518 cacheline < address + size * count;
1519 cacheline += 64) {
1520 retval = dpm->instr_write_data_r0(dpm,
1521 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1522 cacheline);
1523 }
1524 }
1525
1526 /* invalidate D-Cache */
1527 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1528 {
1529 /* DCIMVAC - Invalidate data Cache line
1530 * with MVA to PoC
1531 * MCR p15, 0, r0, c7, c6, 1
1532 */
1533 for (uint32_t cacheline = address;
1534 cacheline < address + size * count;
1535 cacheline += 64) {
1536 retval = dpm->instr_write_data_r0(dpm,
1537 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1538 cacheline);
1539 }
1540 }
1541
1542 /* (void) */ dpm->finish(dpm);
1543 }
1544
1545 return retval;
1546 }
1547
1548 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1549 uint32_t size, uint32_t count, uint8_t *buffer)
1550 {
1551 int enabled = 0;
1552 uint32_t virt, phys;
1553 int retval;
1554
1555 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1556
1557 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1558 retval = cortex_a8_mmu(target, &enabled);
1559 if (retval != ERROR_OK)
1560 return retval;
1561 if(enabled)
1562 {
1563 virt = address;
1564 cortex_a8_virt2phys(target, virt, &phys);
1565 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1566 address = phys;
1567 }
1568
1569 return cortex_a8_write_phys_memory(target, address, size,
1570 count, buffer);
1571 }
1572
1573 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1574 uint32_t count, uint8_t *buffer)
1575 {
1576 return cortex_a8_write_memory(target, address, 4, count, buffer);
1577 }
1578
1579
1580 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1581 {
1582 #if 0
1583 u16 dcrdr;
1584
1585 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1586 *ctrl = (uint8_t)dcrdr;
1587 *value = (uint8_t)(dcrdr >> 8);
1588
1589 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1590
1591 /* write ack back to software dcc register
1592 * signify we have read data */
1593 if (dcrdr & (1 << 0))
1594 {
1595 dcrdr = 0;
1596 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1597 }
1598 #endif
1599 return ERROR_OK;
1600 }
1601
1602
1603 static int cortex_a8_handle_target_request(void *priv)
1604 {
1605 struct target *target = priv;
1606 struct armv7a_common *armv7a = target_to_armv7a(target);
1607 struct adiv5_dap *swjdp = &armv7a->dap;
1608
1609 if (!target_was_examined(target))
1610 return ERROR_OK;
1611 if (!target->dbg_msg_enabled)
1612 return ERROR_OK;
1613
1614 if (target->state == TARGET_RUNNING)
1615 {
1616 uint8_t data = 0;
1617 uint8_t ctrl = 0;
1618
1619 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1620
1621 /* check if we have data */
1622 if (ctrl & (1 << 0))
1623 {
1624 uint32_t request;
1625
1626 /* we assume target is quick enough */
1627 request = data;
1628 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1629 request |= (data << 8);
1630 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1631 request |= (data << 16);
1632 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1633 request |= (data << 24);
1634 target_request(target, request);
1635 }
1636 }
1637
1638 return ERROR_OK;
1639 }
1640
1641 /*
1642 * Cortex-A8 target information and configuration
1643 */
1644
1645 static int cortex_a8_examine_first(struct target *target)
1646 {
1647 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1648 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1649 struct adiv5_dap *swjdp = &armv7a->dap;
1650 int i;
1651 int retval = ERROR_OK;
1652 uint32_t didr, ctypr, ttypr, cpuid;
1653
1654 /* stop assuming this is an OMAP! */
1655 LOG_DEBUG("TODO - autoconfigure");
1656
1657 /* Here we shall insert a proper ROM Table scan */
1658 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1659
1660 /* We do one extra read to ensure DAP is configured,
1661 * we call ahbap_debugport_init(swjdp) instead
1662 */
1663 retval = ahbap_debugport_init(swjdp);
1664 if (retval != ERROR_OK)
1665 return retval;
1666
1667 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1668 if (retval != ERROR_OK)
1669 return retval;
1670
1671 if ((retval = mem_ap_read_atomic_u32(swjdp,
1672 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1673 {
1674 LOG_DEBUG("Examine %s failed", "CPUID");
1675 return retval;
1676 }
1677
1678 if ((retval = mem_ap_read_atomic_u32(swjdp,
1679 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1680 {
1681 LOG_DEBUG("Examine %s failed", "CTYPR");
1682 return retval;
1683 }
1684
1685 if ((retval = mem_ap_read_atomic_u32(swjdp,
1686 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1687 {
1688 LOG_DEBUG("Examine %s failed", "TTYPR");
1689 return retval;
1690 }
1691
1692 if ((retval = mem_ap_read_atomic_u32(swjdp,
1693 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1694 {
1695 LOG_DEBUG("Examine %s failed", "DIDR");
1696 return retval;
1697 }
1698
1699 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1700 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1701 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1702 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1703
1704 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1705 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1706 if (retval != ERROR_OK)
1707 return retval;
1708
1709 /* Setup Breakpoint Register Pairs */
1710 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1711 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1712 cortex_a8->brp_num_available = cortex_a8->brp_num;
1713 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1714 // cortex_a8->brb_enabled = ????;
1715 for (i = 0; i < cortex_a8->brp_num; i++)
1716 {
1717 cortex_a8->brp_list[i].used = 0;
1718 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1719 cortex_a8->brp_list[i].type = BRP_NORMAL;
1720 else
1721 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1722 cortex_a8->brp_list[i].value = 0;
1723 cortex_a8->brp_list[i].control = 0;
1724 cortex_a8->brp_list[i].BRPn = i;
1725 }
1726
1727 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1728
1729 target_set_examined(target);
1730 return ERROR_OK;
1731 }
1732
1733 static int cortex_a8_examine(struct target *target)
1734 {
1735 int retval = ERROR_OK;
1736
1737 /* don't re-probe hardware after each reset */
1738 if (!target_was_examined(target))
1739 retval = cortex_a8_examine_first(target);
1740
1741 /* Configure core debug access */
1742 if (retval == ERROR_OK)
1743 retval = cortex_a8_init_debug_access(target);
1744
1745 return retval;
1746 }
1747
1748 /*
1749 * Cortex-A8 target creation and initialization
1750 */
1751
1752 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1753 struct target *target)
1754 {
1755 /* examine_first() does a bunch of this */
1756 return ERROR_OK;
1757 }
1758
1759 static int cortex_a8_init_arch_info(struct target *target,
1760 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1761 {
1762 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1763 struct arm *armv4_5 = &armv7a->armv4_5_common;
1764 struct adiv5_dap *dap = &armv7a->dap;
1765
1766 armv7a->armv4_5_common.dap = dap;
1767
1768 /* Setup struct cortex_a8_common */
1769 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1770 armv4_5->arch_info = armv7a;
1771
1772 /* prepare JTAG information for the new target */
1773 cortex_a8->jtag_info.tap = tap;
1774 cortex_a8->jtag_info.scann_size = 4;
1775
1776 /* Leave (only) generic DAP stuff for debugport_init() */
1777 dap->jtag_info = &cortex_a8->jtag_info;
1778 dap->memaccess_tck = 80;
1779
1780 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1781 dap->tar_autoincr_block = (1 << 10);
1782
1783 cortex_a8->fast_reg_read = 0;
1784
1785 /* Set default value */
1786 cortex_a8->current_address_mode = ARM_MODE_ANY;
1787
1788 /* register arch-specific functions */
1789 armv7a->examine_debug_reason = NULL;
1790
1791 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1792
1793 armv7a->pre_restore_context = NULL;
1794 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1795 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1796 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1797 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1798 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1799 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1800 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1801 armv7a->armv4_5_mmu.mmu_enabled = 0;
1802
1803
1804 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1805
1806 /* REVISIT v7a setup should be in a v7a-specific routine */
1807 arm_init_arch_info(target, armv4_5);
1808 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1809
1810 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1811
1812 return ERROR_OK;
1813 }
1814
1815 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1816 {
1817 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1818
1819 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1820
1821 return ERROR_OK;
1822 }
1823
1824 static uint32_t cortex_a8_get_ttb(struct target *target)
1825 {
1826 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1827 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1828 uint32_t ttb = 0, retval = ERROR_OK;
1829
1830 /* current_address_mode is set inside cortex_a8_virt2phys()
1831 where we can determine if address belongs to user or kernel */
1832 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1833 {
1834 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1835 retval = armv7a->armv4_5_common.mrc(target, 15,
1836 0, 1, /* op1, op2 */
1837 2, 0, /* CRn, CRm */
1838 &ttb);
1839 }
1840 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1841 {
1842 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1843 retval = armv7a->armv4_5_common.mrc(target, 15,
1844 0, 0, /* op1, op2 */
1845 2, 0, /* CRn, CRm */
1846 &ttb);
1847 }
1848 /* we don't know whose address is: user or kernel
1849 we assume that if we are in kernel mode then
1850 address belongs to kernel else if in user mode
1851 - to user */
1852 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1853 {
1854 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1855 retval = armv7a->armv4_5_common.mrc(target, 15,
1856 0, 1, /* op1, op2 */
1857 2, 0, /* CRn, CRm */
1858 &ttb);
1859 }
1860 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1861 {
1862 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1863 retval = armv7a->armv4_5_common.mrc(target, 15,
1864 0, 0, /* op1, op2 */
1865 2, 0, /* CRn, CRm */
1866 &ttb);
1867 }
1868 /* finally we don't know whose ttb to use: user or kernel */
1869 else
1870 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1871
1872 ttb &= 0xffffc000;
1873
1874 return ttb;
1875 }
1876
1877 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1878 int d_u_cache, int i_cache)
1879 {
1880 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1881 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1882 uint32_t cp15_control;
1883
1884 /* read cp15 control register */
1885 armv7a->armv4_5_common.mrc(target, 15,
1886 0, 0, /* op1, op2 */
1887 1, 0, /* CRn, CRm */
1888 &cp15_control);
1889
1890
1891 if (mmu)
1892 cp15_control &= ~0x1U;
1893
1894 if (d_u_cache)
1895 cp15_control &= ~0x4U;
1896
1897 if (i_cache)
1898 cp15_control &= ~0x1000U;
1899
1900 armv7a->armv4_5_common.mcr(target, 15,
1901 0, 0, /* op1, op2 */
1902 1, 0, /* CRn, CRm */
1903 cp15_control);
1904 }
1905
1906 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1907 int d_u_cache, int i_cache)
1908 {
1909 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1910 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1911 uint32_t cp15_control;
1912
1913 /* read cp15 control register */
1914 armv7a->armv4_5_common.mrc(target, 15,
1915 0, 0, /* op1, op2 */
1916 1, 0, /* CRn, CRm */
1917 &cp15_control);
1918
1919 if (mmu)
1920 cp15_control |= 0x1U;
1921
1922 if (d_u_cache)
1923 cp15_control |= 0x4U;
1924
1925 if (i_cache)
1926 cp15_control |= 0x1000U;
1927
1928 armv7a->armv4_5_common.mcr(target, 15,
1929 0, 0, /* op1, op2 */
1930 1, 0, /* CRn, CRm */
1931 cp15_control);
1932 }
1933
1934
1935 static int cortex_a8_mmu(struct target *target, int *enabled)
1936 {
1937 if (target->state != TARGET_HALTED) {
1938 LOG_ERROR("%s: target not halted", __func__);
1939 return ERROR_TARGET_INVALID;
1940 }
1941
1942 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1943 return ERROR_OK;
1944 }
1945
1946 static int cortex_a8_virt2phys(struct target *target,
1947 uint32_t virt, uint32_t *phys)
1948 {
1949 uint32_t cb;
1950 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1951 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1952 struct armv7a_common *armv7a = target_to_armv7a(target);
1953
1954 /* We assume that virtual address is separated
1955 between user and kernel in Linux style:
1956 0x00000000-0xbfffffff - User space
1957 0xc0000000-0xffffffff - Kernel space */
1958 if( virt < 0xc0000000 ) /* Linux user space */
1959 cortex_a8->current_address_mode = ARM_MODE_USR;
1960 else /* Linux kernel */
1961 cortex_a8->current_address_mode = ARM_MODE_SVC;
1962 uint32_t ret;
1963 int retval = armv4_5_mmu_translate_va(target,
1964 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1965 if (retval != ERROR_OK)
1966 return retval;
1967 /* Reset the flag. We don't want someone else to use it by error */
1968 cortex_a8->current_address_mode = ARM_MODE_ANY;
1969
1970 *phys = ret;
1971 return ERROR_OK;
1972 }
1973
1974 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1975 {
1976 struct target *target = get_current_target(CMD_CTX);
1977 struct armv7a_common *armv7a = target_to_armv7a(target);
1978
1979 return armv4_5_handle_cache_info_command(CMD_CTX,
1980 &armv7a->armv4_5_mmu.armv4_5_cache);
1981 }
1982
1983
1984 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1985 {
1986 struct target *target = get_current_target(CMD_CTX);
1987 if (!target_was_examined(target))
1988 {
1989 LOG_ERROR("target not examined yet");
1990 return ERROR_FAIL;
1991 }
1992
1993 return cortex_a8_init_debug_access(target);
1994 }
1995
1996 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1997 {
1998 .name = "cache_info",
1999 .handler = cortex_a8_handle_cache_info_command,
2000 .mode = COMMAND_EXEC,
2001 .help = "display information about target caches",
2002 },
2003 {
2004 .name = "dbginit",
2005 .handler = cortex_a8_handle_dbginit_command,
2006 .mode = COMMAND_EXEC,
2007 .help = "Initialize core debug",
2008 },
2009 COMMAND_REGISTRATION_DONE
2010 };
2011 static const struct command_registration cortex_a8_command_handlers[] = {
2012 {
2013 .chain = arm_command_handlers,
2014 },
2015 {
2016 .chain = armv7a_command_handlers,
2017 },
2018 {
2019 .name = "cortex_a8",
2020 .mode = COMMAND_ANY,
2021 .help = "Cortex-A8 command group",
2022 .chain = cortex_a8_exec_command_handlers,
2023 },
2024 COMMAND_REGISTRATION_DONE
2025 };
2026
2027 struct target_type cortexa8_target = {
2028 .name = "cortex_a8",
2029
2030 .poll = cortex_a8_poll,
2031 .arch_state = armv7a_arch_state,
2032
2033 .target_request_data = NULL,
2034
2035 .halt = cortex_a8_halt,
2036 .resume = cortex_a8_resume,
2037 .step = cortex_a8_step,
2038
2039 .assert_reset = cortex_a8_assert_reset,
2040 .deassert_reset = cortex_a8_deassert_reset,
2041 .soft_reset_halt = NULL,
2042
2043 /* REVISIT allow exporting VFP3 registers ... */
2044 .get_gdb_reg_list = arm_get_gdb_reg_list,
2045
2046 .read_memory = cortex_a8_read_memory,
2047 .write_memory = cortex_a8_write_memory,
2048 .bulk_write_memory = cortex_a8_bulk_write_memory,
2049
2050 .checksum_memory = arm_checksum_memory,
2051 .blank_check_memory = arm_blank_check_memory,
2052
2053 .run_algorithm = armv4_5_run_algorithm,
2054
2055 .add_breakpoint = cortex_a8_add_breakpoint,
2056 .remove_breakpoint = cortex_a8_remove_breakpoint,
2057 .add_watchpoint = NULL,
2058 .remove_watchpoint = NULL,
2059
2060 .commands = cortex_a8_command_handlers,
2061 .target_create = cortex_a8_target_create,
2062 .init_target = cortex_a8_init_target,
2063 .examine = cortex_a8_examine,
2064
2065 .read_phys_memory = cortex_a8_read_phys_memory,
2066 .write_phys_memory = cortex_a8_write_phys_memory,
2067 .mmu = cortex_a8_mmu,
2068 .virt2phys = cortex_a8_virt2phys,
2069
2070 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)