cortex_a : optimize apb read/write access.
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include "breakpoints.h"
45 #include "cortex_a.h"
46 #include "register.h"
47 #include "target_request.h"
48 #include "target_type.h"
49 #include "arm_opcodes.h"
50 #include <helper/time_support.h>
51
52 static int cortex_a8_poll(struct target *target);
53 static int cortex_a8_debug_entry(struct target *target);
54 static int cortex_a8_restore_context(struct target *target, bool bpwp);
55 static int cortex_a8_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int cortex_a8_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int cortex_a8_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
64 uint32_t *value, int regnum);
65 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
66 uint32_t value, int regnum);
67 static int cortex_a8_mmu(struct target *target, int *enabled);
68 static int cortex_a8_virt2phys(struct target *target,
69 uint32_t virt, uint32_t *phys);
70 static int cortex_a8_read_apb_ab_memory(struct target *target,
71 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
72
73 /*
74 * FIXME do topology discovery using the ROM; don't
75 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
76 * cores, with different AP numbering ... don't use a #define
77 * for these numbers, use per-core armv7a state.
78 */
79 #define swjdp_memoryap 0
80 #define swjdp_debugap 1
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a8_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a8->cp15_control_reg != cortex_a8->cp15_control_reg_curr) {
90 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a8->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a8_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a8_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%x access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a8->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%x access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a8->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a8_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a8->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) {
142 cortex_a8->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a8->cp15_control_reg_curr);
147 }
148 } else {
149 if (cortex_a8->cp15_control_reg_curr & 0x4U) {
150 /* data cache is active */
151 cortex_a8->cp15_control_reg_curr &= ~0x4U;
152 /* flush data cache armv7 function to be called */
153 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
154 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
155 }
156 if ((cortex_a8->cp15_control_reg_curr & 0x1U)) {
157 cortex_a8->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a8->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A8 Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175 uint32_t dummy;
176
177 LOG_DEBUG(" ");
178
179 /* Unlocking the debug registers for modification
180 * The debugport might be uninitialised so try twice */
181 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
182 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
183 if (retval != ERROR_OK) {
184 /* try again */
185 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
186 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
187 if (retval == ERROR_OK)
188 LOG_USER(
189 "Locking debug access failed on first, but succeeded on second try.");
190 }
191 if (retval != ERROR_OK)
192 return retval;
193 /* Clear Sticky Power Down status Bit in PRSR to enable access to
194 the registers in the Core Power Domain */
195 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
196 armv7a->debug_base + CPUDBG_PRSR, &dummy);
197 if (retval != ERROR_OK)
198 return retval;
199
200 /* Enabling of instruction execution in debug mode is done in debug_entry code */
201
202 /* Resync breakpoint registers */
203
204 /* Since this is likely called from init or reset, update target state information*/
205 return cortex_a8_poll(target);
206 }
207
208 /* To reduce needless round-trips, pass in a pointer to the current
209 * DSCR value. Initialize it to zero if you just need to know the
210 * value on return from this function; or DSCR_INSTR_COMP if you
211 * happen to know that no instruction is pending.
212 */
213 static int cortex_a8_exec_opcode(struct target *target,
214 uint32_t opcode, uint32_t *dscr_p)
215 {
216 uint32_t dscr;
217 int retval;
218 struct armv7a_common *armv7a = target_to_armv7a(target);
219 struct adiv5_dap *swjdp = armv7a->arm.dap;
220
221 dscr = dscr_p ? *dscr_p : 0;
222
223 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
224
225 /* Wait for InstrCompl bit to be set */
226 long long then = timeval_ms();
227 while ((dscr & DSCR_INSTR_COMP) == 0) {
228 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
229 armv7a->debug_base + CPUDBG_DSCR, &dscr);
230 if (retval != ERROR_OK) {
231 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
232 return retval;
233 }
234 if (timeval_ms() > then + 1000) {
235 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
236 return ERROR_FAIL;
237 }
238 }
239
240 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
241 armv7a->debug_base + CPUDBG_ITR, opcode);
242 if (retval != ERROR_OK)
243 return retval;
244
245 then = timeval_ms();
246 do {
247 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
248 armv7a->debug_base + CPUDBG_DSCR, &dscr);
249 if (retval != ERROR_OK) {
250 LOG_ERROR("Could not read DSCR register");
251 return retval;
252 }
253 if (timeval_ms() > then + 1000) {
254 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
255 return ERROR_FAIL;
256 }
257 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
258
259 if (dscr_p)
260 *dscr_p = dscr;
261
262 return retval;
263 }
264
265 /**************************************************************************
266 Read core register with very few exec_opcode, fast but needs work_area.
267 This can cause problems with MMU active.
268 **************************************************************************/
269 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
270 uint32_t *regfile)
271 {
272 int retval = ERROR_OK;
273 struct armv7a_common *armv7a = target_to_armv7a(target);
274 struct adiv5_dap *swjdp = armv7a->arm.dap;
275
276 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
277 if (retval != ERROR_OK)
278 return retval;
279 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
280 if (retval != ERROR_OK)
281 return retval;
282 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
283 if (retval != ERROR_OK)
284 return retval;
285
286 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
287 (uint8_t *)(&regfile[1]), 4*15, address);
288
289 return retval;
290 }
291
292 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
293 uint32_t *value, int regnum)
294 {
295 int retval = ERROR_OK;
296 uint8_t reg = regnum&0xFF;
297 uint32_t dscr = 0;
298 struct armv7a_common *armv7a = target_to_armv7a(target);
299 struct adiv5_dap *swjdp = armv7a->arm.dap;
300
301 if (reg > 17)
302 return retval;
303
304 if (reg < 15) {
305 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
306 retval = cortex_a8_exec_opcode(target,
307 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
308 &dscr);
309 if (retval != ERROR_OK)
310 return retval;
311 } else if (reg == 15) {
312 /* "MOV r0, r15"; then move r0 to DCCTX */
313 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
314 if (retval != ERROR_OK)
315 return retval;
316 retval = cortex_a8_exec_opcode(target,
317 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
318 &dscr);
319 if (retval != ERROR_OK)
320 return retval;
321 } else {
322 /* "MRS r0, CPSR" or "MRS r0, SPSR"
323 * then move r0 to DCCTX
324 */
325 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
326 if (retval != ERROR_OK)
327 return retval;
328 retval = cortex_a8_exec_opcode(target,
329 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
330 &dscr);
331 if (retval != ERROR_OK)
332 return retval;
333 }
334
335 /* Wait for DTRRXfull then read DTRRTX */
336 long long then = timeval_ms();
337 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
338 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
339 armv7a->debug_base + CPUDBG_DSCR, &dscr);
340 if (retval != ERROR_OK)
341 return retval;
342 if (timeval_ms() > then + 1000) {
343 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
344 return ERROR_FAIL;
345 }
346 }
347
348 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
349 armv7a->debug_base + CPUDBG_DTRTX, value);
350 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
351
352 return retval;
353 }
354
355 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
356 uint32_t value, int regnum)
357 {
358 int retval = ERROR_OK;
359 uint8_t Rd = regnum&0xFF;
360 uint32_t dscr;
361 struct armv7a_common *armv7a = target_to_armv7a(target);
362 struct adiv5_dap *swjdp = armv7a->arm.dap;
363
364 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
365
366 /* Check that DCCRX is not full */
367 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
368 armv7a->debug_base + CPUDBG_DSCR, &dscr);
369 if (retval != ERROR_OK)
370 return retval;
371 if (dscr & DSCR_DTR_RX_FULL) {
372 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
373 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
374 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
375 &dscr);
376 if (retval != ERROR_OK)
377 return retval;
378 }
379
380 if (Rd > 17)
381 return retval;
382
383 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
384 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
385 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
386 armv7a->debug_base + CPUDBG_DTRRX, value);
387 if (retval != ERROR_OK)
388 return retval;
389
390 if (Rd < 15) {
391 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
392 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
393 &dscr);
394
395 if (retval != ERROR_OK)
396 return retval;
397 } else if (Rd == 15) {
398 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
399 * then "mov r15, r0"
400 */
401 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
402 &dscr);
403 if (retval != ERROR_OK)
404 return retval;
405 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
406 if (retval != ERROR_OK)
407 return retval;
408 } else {
409 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
410 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
411 */
412 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
413 &dscr);
414 if (retval != ERROR_OK)
415 return retval;
416 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
417 &dscr);
418 if (retval != ERROR_OK)
419 return retval;
420
421 /* "Prefetch flush" after modifying execution status in CPSR */
422 if (Rd == 16) {
423 retval = cortex_a8_exec_opcode(target,
424 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
425 &dscr);
426 if (retval != ERROR_OK)
427 return retval;
428 }
429 }
430
431 return retval;
432 }
433
434 /* Write to memory mapped registers directly with no cache or mmu handling */
435 static int cortex_a8_dap_write_memap_register_u32(struct target *target,
436 uint32_t address,
437 uint32_t value)
438 {
439 int retval;
440 struct armv7a_common *armv7a = target_to_armv7a(target);
441 struct adiv5_dap *swjdp = armv7a->arm.dap;
442
443 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
444
445 return retval;
446 }
447
448 /*
449 * Cortex-A8 implementation of Debug Programmer's Model
450 *
451 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
452 * so there's no need to poll for it before executing an instruction.
453 *
454 * NOTE that in several of these cases the "stall" mode might be useful.
455 * It'd let us queue a few operations together... prepare/finish might
456 * be the places to enable/disable that mode.
457 */
458
459 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
460 {
461 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
462 }
463
464 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
465 {
466 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
467 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
468 swjdp_debugap, a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
469 }
470
471 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
472 uint32_t *dscr_p)
473 {
474 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
475 uint32_t dscr = DSCR_INSTR_COMP;
476 int retval;
477
478 if (dscr_p)
479 dscr = *dscr_p;
480
481 /* Wait for DTRRXfull */
482 long long then = timeval_ms();
483 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
484 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
485 a8->armv7a_common.debug_base + CPUDBG_DSCR,
486 &dscr);
487 if (retval != ERROR_OK)
488 return retval;
489 if (timeval_ms() > then + 1000) {
490 LOG_ERROR("Timeout waiting for read dcc");
491 return ERROR_FAIL;
492 }
493 }
494
495 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
496 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
497 if (retval != ERROR_OK)
498 return retval;
499 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
500
501 if (dscr_p)
502 *dscr_p = dscr;
503
504 return retval;
505 }
506
507 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
508 {
509 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
510 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
511 uint32_t dscr;
512 int retval;
513
514 /* set up invariant: INSTR_COMP is set after ever DPM operation */
515 long long then = timeval_ms();
516 for (;; ) {
517 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
518 a8->armv7a_common.debug_base + CPUDBG_DSCR,
519 &dscr);
520 if (retval != ERROR_OK)
521 return retval;
522 if ((dscr & DSCR_INSTR_COMP) != 0)
523 break;
524 if (timeval_ms() > then + 1000) {
525 LOG_ERROR("Timeout waiting for dpm prepare");
526 return ERROR_FAIL;
527 }
528 }
529
530 /* this "should never happen" ... */
531 if (dscr & DSCR_DTR_RX_FULL) {
532 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
533 /* Clear DCCRX */
534 retval = cortex_a8_exec_opcode(
535 a8->armv7a_common.arm.target,
536 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
537 &dscr);
538 if (retval != ERROR_OK)
539 return retval;
540 }
541
542 return retval;
543 }
544
545 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
546 {
547 /* REVISIT what could be done here? */
548 return ERROR_OK;
549 }
550
551 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
552 uint32_t opcode, uint32_t data)
553 {
554 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
555 int retval;
556 uint32_t dscr = DSCR_INSTR_COMP;
557
558 retval = cortex_a8_write_dcc(a8, data);
559 if (retval != ERROR_OK)
560 return retval;
561
562 return cortex_a8_exec_opcode(
563 a8->armv7a_common.arm.target,
564 opcode,
565 &dscr);
566 }
567
568 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
569 uint32_t opcode, uint32_t data)
570 {
571 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
572 uint32_t dscr = DSCR_INSTR_COMP;
573 int retval;
574
575 retval = cortex_a8_write_dcc(a8, data);
576 if (retval != ERROR_OK)
577 return retval;
578
579 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
580 retval = cortex_a8_exec_opcode(
581 a8->armv7a_common.arm.target,
582 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
583 &dscr);
584 if (retval != ERROR_OK)
585 return retval;
586
587 /* then the opcode, taking data from R0 */
588 retval = cortex_a8_exec_opcode(
589 a8->armv7a_common.arm.target,
590 opcode,
591 &dscr);
592
593 return retval;
594 }
595
596 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
597 {
598 struct target *target = dpm->arm->target;
599 uint32_t dscr = DSCR_INSTR_COMP;
600
601 /* "Prefetch flush" after modifying execution status in CPSR */
602 return cortex_a8_exec_opcode(target,
603 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
604 &dscr);
605 }
606
607 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
608 uint32_t opcode, uint32_t *data)
609 {
610 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
611 int retval;
612 uint32_t dscr = DSCR_INSTR_COMP;
613
614 /* the opcode, writing data to DCC */
615 retval = cortex_a8_exec_opcode(
616 a8->armv7a_common.arm.target,
617 opcode,
618 &dscr);
619 if (retval != ERROR_OK)
620 return retval;
621
622 return cortex_a8_read_dcc(a8, data, &dscr);
623 }
624
625
626 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
627 uint32_t opcode, uint32_t *data)
628 {
629 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
630 uint32_t dscr = DSCR_INSTR_COMP;
631 int retval;
632
633 /* the opcode, writing data to R0 */
634 retval = cortex_a8_exec_opcode(
635 a8->armv7a_common.arm.target,
636 opcode,
637 &dscr);
638 if (retval != ERROR_OK)
639 return retval;
640
641 /* write R0 to DCC */
642 retval = cortex_a8_exec_opcode(
643 a8->armv7a_common.arm.target,
644 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
645 &dscr);
646 if (retval != ERROR_OK)
647 return retval;
648
649 return cortex_a8_read_dcc(a8, data, &dscr);
650 }
651
652 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
653 uint32_t addr, uint32_t control)
654 {
655 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
656 uint32_t vr = a8->armv7a_common.debug_base;
657 uint32_t cr = a8->armv7a_common.debug_base;
658 int retval;
659
660 switch (index_t) {
661 case 0 ... 15: /* breakpoints */
662 vr += CPUDBG_BVR_BASE;
663 cr += CPUDBG_BCR_BASE;
664 break;
665 case 16 ... 31: /* watchpoints */
666 vr += CPUDBG_WVR_BASE;
667 cr += CPUDBG_WCR_BASE;
668 index_t -= 16;
669 break;
670 default:
671 return ERROR_FAIL;
672 }
673 vr += 4 * index_t;
674 cr += 4 * index_t;
675
676 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
677 (unsigned) vr, (unsigned) cr);
678
679 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
680 vr, addr);
681 if (retval != ERROR_OK)
682 return retval;
683 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
684 cr, control);
685 return retval;
686 }
687
688 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
689 {
690 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
691 uint32_t cr;
692
693 switch (index_t) {
694 case 0 ... 15:
695 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
696 break;
697 case 16 ... 31:
698 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
699 index_t -= 16;
700 break;
701 default:
702 return ERROR_FAIL;
703 }
704 cr += 4 * index_t;
705
706 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
707
708 /* clear control register */
709 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
710 }
711
712 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
713 {
714 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
715 int retval;
716
717 dpm->arm = &a8->armv7a_common.arm;
718 dpm->didr = didr;
719
720 dpm->prepare = cortex_a8_dpm_prepare;
721 dpm->finish = cortex_a8_dpm_finish;
722
723 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
724 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
725 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
726
727 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
728 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
729
730 dpm->bpwp_enable = cortex_a8_bpwp_enable;
731 dpm->bpwp_disable = cortex_a8_bpwp_disable;
732
733 retval = arm_dpm_setup(dpm);
734 if (retval == ERROR_OK)
735 retval = arm_dpm_initialize(dpm);
736
737 return retval;
738 }
739 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
740 {
741 struct target_list *head;
742 struct target *curr;
743
744 head = target->head;
745 while (head != (struct target_list *)NULL) {
746 curr = head->target;
747 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
748 return curr;
749 head = head->next;
750 }
751 return target;
752 }
753 static int cortex_a8_halt(struct target *target);
754
755 static int cortex_a8_halt_smp(struct target *target)
756 {
757 int retval = 0;
758 struct target_list *head;
759 struct target *curr;
760 head = target->head;
761 while (head != (struct target_list *)NULL) {
762 curr = head->target;
763 if ((curr != target) && (curr->state != TARGET_HALTED))
764 retval += cortex_a8_halt(curr);
765 head = head->next;
766 }
767 return retval;
768 }
769
770 static int update_halt_gdb(struct target *target)
771 {
772 int retval = 0;
773 if (target->gdb_service->core[0] == -1) {
774 target->gdb_service->target = target;
775 target->gdb_service->core[0] = target->coreid;
776 retval += cortex_a8_halt_smp(target);
777 }
778 return retval;
779 }
780
781 /*
782 * Cortex-A8 Run control
783 */
784
785 static int cortex_a8_poll(struct target *target)
786 {
787 int retval = ERROR_OK;
788 uint32_t dscr;
789 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
790 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
791 struct adiv5_dap *swjdp = armv7a->arm.dap;
792 enum target_state prev_target_state = target->state;
793 /* toggle to another core is done by gdb as follow */
794 /* maint packet J core_id */
795 /* continue */
796 /* the next polling trigger an halt event sent to gdb */
797 if ((target->state == TARGET_HALTED) && (target->smp) &&
798 (target->gdb_service) &&
799 (target->gdb_service->target == NULL)) {
800 target->gdb_service->target =
801 get_cortex_a8(target, target->gdb_service->core[1]);
802 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
803 return retval;
804 }
805 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
806 armv7a->debug_base + CPUDBG_DSCR, &dscr);
807 if (retval != ERROR_OK)
808 return retval;
809 cortex_a8->cpudbg_dscr = dscr;
810
811 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
812 if (prev_target_state != TARGET_HALTED) {
813 /* We have a halting debug event */
814 LOG_DEBUG("Target halted");
815 target->state = TARGET_HALTED;
816 if ((prev_target_state == TARGET_RUNNING)
817 || (prev_target_state == TARGET_UNKNOWN)
818 || (prev_target_state == TARGET_RESET)) {
819 retval = cortex_a8_debug_entry(target);
820 if (retval != ERROR_OK)
821 return retval;
822 if (target->smp) {
823 retval = update_halt_gdb(target);
824 if (retval != ERROR_OK)
825 return retval;
826 }
827 target_call_event_callbacks(target,
828 TARGET_EVENT_HALTED);
829 }
830 if (prev_target_state == TARGET_DEBUG_RUNNING) {
831 LOG_DEBUG(" ");
832
833 retval = cortex_a8_debug_entry(target);
834 if (retval != ERROR_OK)
835 return retval;
836 if (target->smp) {
837 retval = update_halt_gdb(target);
838 if (retval != ERROR_OK)
839 return retval;
840 }
841
842 target_call_event_callbacks(target,
843 TARGET_EVENT_DEBUG_HALTED);
844 }
845 }
846 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
847 target->state = TARGET_RUNNING;
848 else {
849 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
850 target->state = TARGET_UNKNOWN;
851 }
852
853 return retval;
854 }
855
856 static int cortex_a8_halt(struct target *target)
857 {
858 int retval = ERROR_OK;
859 uint32_t dscr;
860 struct armv7a_common *armv7a = target_to_armv7a(target);
861 struct adiv5_dap *swjdp = armv7a->arm.dap;
862
863 /*
864 * Tell the core to be halted by writing DRCR with 0x1
865 * and then wait for the core to be halted.
866 */
867 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
868 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
869 if (retval != ERROR_OK)
870 return retval;
871
872 /*
873 * enter halting debug mode
874 */
875 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
876 armv7a->debug_base + CPUDBG_DSCR, &dscr);
877 if (retval != ERROR_OK)
878 return retval;
879
880 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
881 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
882 if (retval != ERROR_OK)
883 return retval;
884
885 long long then = timeval_ms();
886 for (;; ) {
887 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
888 armv7a->debug_base + CPUDBG_DSCR, &dscr);
889 if (retval != ERROR_OK)
890 return retval;
891 if ((dscr & DSCR_CORE_HALTED) != 0)
892 break;
893 if (timeval_ms() > then + 1000) {
894 LOG_ERROR("Timeout waiting for halt");
895 return ERROR_FAIL;
896 }
897 }
898
899 target->debug_reason = DBG_REASON_DBGRQ;
900
901 return ERROR_OK;
902 }
903
904 static int cortex_a8_internal_restore(struct target *target, int current,
905 uint32_t *address, int handle_breakpoints, int debug_execution)
906 {
907 struct armv7a_common *armv7a = target_to_armv7a(target);
908 struct arm *arm = &armv7a->arm;
909 int retval;
910 uint32_t resume_pc;
911
912 if (!debug_execution)
913 target_free_all_working_areas(target);
914
915 #if 0
916 if (debug_execution) {
917 /* Disable interrupts */
918 /* We disable interrupts in the PRIMASK register instead of
919 * masking with C_MASKINTS,
920 * This is probably the same issue as Cortex-M3 Errata 377493:
921 * C_MASKINTS in parallel with disabled interrupts can cause
922 * local faults to not be taken. */
923 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
924 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
925 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
926
927 /* Make sure we are in Thumb mode */
928 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
929 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
930 32) | (1 << 24));
931 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
932 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
933 }
934 #endif
935
936 /* current = 1: continue on current pc, otherwise continue at <address> */
937 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
938 if (!current)
939 resume_pc = *address;
940 else
941 *address = resume_pc;
942
943 /* Make sure that the Armv7 gdb thumb fixups does not
944 * kill the return address
945 */
946 switch (arm->core_state) {
947 case ARM_STATE_ARM:
948 resume_pc &= 0xFFFFFFFC;
949 break;
950 case ARM_STATE_THUMB:
951 case ARM_STATE_THUMB_EE:
952 /* When the return address is loaded into PC
953 * bit 0 must be 1 to stay in Thumb state
954 */
955 resume_pc |= 0x1;
956 break;
957 case ARM_STATE_JAZELLE:
958 LOG_ERROR("How do I resume into Jazelle state??");
959 return ERROR_FAIL;
960 }
961 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
962 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
963 arm->pc->dirty = 1;
964 arm->pc->valid = 1;
965 /* restore dpm_mode at system halt */
966 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
967 /* called it now before restoring context because it uses cpu
968 * register r0 for restoring cp15 control register */
969 retval = cortex_a8_restore_cp15_control_reg(target);
970 if (retval != ERROR_OK)
971 return retval;
972 retval = cortex_a8_restore_context(target, handle_breakpoints);
973 if (retval != ERROR_OK)
974 return retval;
975 target->debug_reason = DBG_REASON_NOTHALTED;
976 target->state = TARGET_RUNNING;
977
978 /* registers are now invalid */
979 register_cache_invalidate(arm->core_cache);
980
981 #if 0
982 /* the front-end may request us not to handle breakpoints */
983 if (handle_breakpoints) {
984 /* Single step past breakpoint at current address */
985 breakpoint = breakpoint_find(target, resume_pc);
986 if (breakpoint) {
987 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
988 cortex_m3_unset_breakpoint(target, breakpoint);
989 cortex_m3_single_step_core(target);
990 cortex_m3_set_breakpoint(target, breakpoint);
991 }
992 }
993
994 #endif
995 return retval;
996 }
997
998 static int cortex_a8_internal_restart(struct target *target)
999 {
1000 struct armv7a_common *armv7a = target_to_armv7a(target);
1001 struct arm *arm = &armv7a->arm;
1002 struct adiv5_dap *swjdp = arm->dap;
1003 int retval;
1004 uint32_t dscr;
1005 /*
1006 * * Restart core and wait for it to be started. Clear ITRen and sticky
1007 * * exception flags: see ARMv7 ARM, C5.9.
1008 *
1009 * REVISIT: for single stepping, we probably want to
1010 * disable IRQs by default, with optional override...
1011 */
1012
1013 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1014 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1015 if (retval != ERROR_OK)
1016 return retval;
1017
1018 if ((dscr & DSCR_INSTR_COMP) == 0)
1019 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1020
1021 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1022 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1023 if (retval != ERROR_OK)
1024 return retval;
1025
1026 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1027 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1028 DRCR_CLEAR_EXCEPTIONS);
1029 if (retval != ERROR_OK)
1030 return retval;
1031
1032 long long then = timeval_ms();
1033 for (;; ) {
1034 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1035 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1036 if (retval != ERROR_OK)
1037 return retval;
1038 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1039 break;
1040 if (timeval_ms() > then + 1000) {
1041 LOG_ERROR("Timeout waiting for resume");
1042 return ERROR_FAIL;
1043 }
1044 }
1045
1046 target->debug_reason = DBG_REASON_NOTHALTED;
1047 target->state = TARGET_RUNNING;
1048
1049 /* registers are now invalid */
1050 register_cache_invalidate(arm->core_cache);
1051
1052 return ERROR_OK;
1053 }
1054
1055 static int cortex_a8_restore_smp(struct target *target, int handle_breakpoints)
1056 {
1057 int retval = 0;
1058 struct target_list *head;
1059 struct target *curr;
1060 uint32_t address;
1061 head = target->head;
1062 while (head != (struct target_list *)NULL) {
1063 curr = head->target;
1064 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1065 /* resume current address , not in step mode */
1066 retval += cortex_a8_internal_restore(curr, 1, &address,
1067 handle_breakpoints, 0);
1068 retval += cortex_a8_internal_restart(curr);
1069 }
1070 head = head->next;
1071
1072 }
1073 return retval;
1074 }
1075
1076 static int cortex_a8_resume(struct target *target, int current,
1077 uint32_t address, int handle_breakpoints, int debug_execution)
1078 {
1079 int retval = 0;
1080 /* dummy resume for smp toggle in order to reduce gdb impact */
1081 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1082 /* simulate a start and halt of target */
1083 target->gdb_service->target = NULL;
1084 target->gdb_service->core[0] = target->gdb_service->core[1];
1085 /* fake resume at next poll we play the target core[1], see poll*/
1086 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1087 return 0;
1088 }
1089 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1090 if (target->smp) {
1091 target->gdb_service->core[0] = -1;
1092 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1093 if (retval != ERROR_OK)
1094 return retval;
1095 }
1096 cortex_a8_internal_restart(target);
1097
1098 if (!debug_execution) {
1099 target->state = TARGET_RUNNING;
1100 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1101 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1102 } else {
1103 target->state = TARGET_DEBUG_RUNNING;
1104 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1105 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1106 }
1107
1108 return ERROR_OK;
1109 }
1110
1111 static int cortex_a8_debug_entry(struct target *target)
1112 {
1113 int i;
1114 uint32_t regfile[16], cpsr, dscr;
1115 int retval = ERROR_OK;
1116 struct working_area *regfile_working_area = NULL;
1117 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1118 struct armv7a_common *armv7a = target_to_armv7a(target);
1119 struct arm *arm = &armv7a->arm;
1120 struct adiv5_dap *swjdp = armv7a->arm.dap;
1121 struct reg *reg;
1122
1123 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1124
1125 /* REVISIT surely we should not re-read DSCR !! */
1126 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1127 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1128 if (retval != ERROR_OK)
1129 return retval;
1130
1131 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1132 * imprecise data aborts get discarded by issuing a Data
1133 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1134 */
1135
1136 /* Enable the ITR execution once we are in debug mode */
1137 dscr |= DSCR_ITR_EN;
1138 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1139 armv7a->debug_base + CPUDBG_DSCR, dscr);
1140 if (retval != ERROR_OK)
1141 return retval;
1142
1143 /* Examine debug reason */
1144 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1145
1146 /* save address of instruction that triggered the watchpoint? */
1147 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1148 uint32_t wfar;
1149
1150 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1151 armv7a->debug_base + CPUDBG_WFAR,
1152 &wfar);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1156 }
1157
1158 /* REVISIT fast_reg_read is never set ... */
1159
1160 /* Examine target state and mode */
1161 if (cortex_a8->fast_reg_read)
1162 target_alloc_working_area(target, 64, &regfile_working_area);
1163
1164 /* First load register acessible through core debug port*/
1165 if (!regfile_working_area)
1166 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1167 else {
1168 retval = cortex_a8_read_regs_through_mem(target,
1169 regfile_working_area->address, regfile);
1170
1171 target_free_working_area(target, regfile_working_area);
1172 if (retval != ERROR_OK)
1173 return retval;
1174
1175 /* read Current PSR */
1176 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1177 /* store current cpsr */
1178 if (retval != ERROR_OK)
1179 return retval;
1180
1181 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1182
1183 arm_set_cpsr(arm, cpsr);
1184
1185 /* update cache */
1186 for (i = 0; i <= ARM_PC; i++) {
1187 reg = arm_reg_current(arm, i);
1188
1189 buf_set_u32(reg->value, 0, 32, regfile[i]);
1190 reg->valid = 1;
1191 reg->dirty = 0;
1192 }
1193
1194 /* Fixup PC Resume Address */
1195 if (cpsr & (1 << 5)) {
1196 /* T bit set for Thumb or ThumbEE state */
1197 regfile[ARM_PC] -= 4;
1198 } else {
1199 /* ARM state */
1200 regfile[ARM_PC] -= 8;
1201 }
1202
1203 reg = arm->pc;
1204 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1205 reg->dirty = reg->valid;
1206 }
1207
1208 #if 0
1209 /* TODO, Move this */
1210 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1211 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1212 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1213
1214 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1215 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1216
1217 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1218 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1219 #endif
1220
1221 /* Are we in an exception handler */
1222 /* armv4_5->exception_number = 0; */
1223 if (armv7a->post_debug_entry) {
1224 retval = armv7a->post_debug_entry(target);
1225 if (retval != ERROR_OK)
1226 return retval;
1227 }
1228
1229 return retval;
1230 }
1231
1232 static int cortex_a8_post_debug_entry(struct target *target)
1233 {
1234 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1235 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1236 int retval;
1237
1238 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1239 retval = armv7a->arm.mrc(target, 15,
1240 0, 0, /* op1, op2 */
1241 1, 0, /* CRn, CRm */
1242 &cortex_a8->cp15_control_reg);
1243 if (retval != ERROR_OK)
1244 return retval;
1245 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1246 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1247
1248 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1249 armv7a_identify_cache(target);
1250
1251 armv7a->armv7a_mmu.mmu_enabled =
1252 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1253 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1254 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1255 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1256 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1257 cortex_a8->curr_mode = armv7a->arm.core_mode;
1258
1259 return ERROR_OK;
1260 }
1261
1262 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1263 int handle_breakpoints)
1264 {
1265 struct armv7a_common *armv7a = target_to_armv7a(target);
1266 struct arm *arm = &armv7a->arm;
1267 struct breakpoint *breakpoint = NULL;
1268 struct breakpoint stepbreakpoint;
1269 struct reg *r;
1270 int retval;
1271
1272 if (target->state != TARGET_HALTED) {
1273 LOG_WARNING("target not halted");
1274 return ERROR_TARGET_NOT_HALTED;
1275 }
1276
1277 /* current = 1: continue on current pc, otherwise continue at <address> */
1278 r = arm->pc;
1279 if (!current)
1280 buf_set_u32(r->value, 0, 32, address);
1281 else
1282 address = buf_get_u32(r->value, 0, 32);
1283
1284 /* The front-end may request us not to handle breakpoints.
1285 * But since Cortex-A8 uses breakpoint for single step,
1286 * we MUST handle breakpoints.
1287 */
1288 handle_breakpoints = 1;
1289 if (handle_breakpoints) {
1290 breakpoint = breakpoint_find(target, address);
1291 if (breakpoint)
1292 cortex_a8_unset_breakpoint(target, breakpoint);
1293 }
1294
1295 /* Setup single step breakpoint */
1296 stepbreakpoint.address = address;
1297 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1298 ? 2 : 4;
1299 stepbreakpoint.type = BKPT_HARD;
1300 stepbreakpoint.set = 0;
1301
1302 /* Break on IVA mismatch */
1303 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1304
1305 target->debug_reason = DBG_REASON_SINGLESTEP;
1306
1307 retval = cortex_a8_resume(target, 1, address, 0, 0);
1308 if (retval != ERROR_OK)
1309 return retval;
1310
1311 long long then = timeval_ms();
1312 while (target->state != TARGET_HALTED) {
1313 retval = cortex_a8_poll(target);
1314 if (retval != ERROR_OK)
1315 return retval;
1316 if (timeval_ms() > then + 1000) {
1317 LOG_ERROR("timeout waiting for target halt");
1318 return ERROR_FAIL;
1319 }
1320 }
1321
1322 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1323
1324 target->debug_reason = DBG_REASON_BREAKPOINT;
1325
1326 if (breakpoint)
1327 cortex_a8_set_breakpoint(target, breakpoint, 0);
1328
1329 if (target->state != TARGET_HALTED)
1330 LOG_DEBUG("target stepped");
1331
1332 return ERROR_OK;
1333 }
1334
1335 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1336 {
1337 struct armv7a_common *armv7a = target_to_armv7a(target);
1338
1339 LOG_DEBUG(" ");
1340
1341 if (armv7a->pre_restore_context)
1342 armv7a->pre_restore_context(target);
1343
1344 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1345 }
1346
1347 /*
1348 * Cortex-A8 Breakpoint and watchpoint functions
1349 */
1350
1351 /* Setup hardware Breakpoint Register Pair */
1352 static int cortex_a8_set_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint, uint8_t matchmode)
1354 {
1355 int retval;
1356 int brp_i = 0;
1357 uint32_t control;
1358 uint8_t byte_addr_select = 0x0F;
1359 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1360 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1361 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1362
1363 if (breakpoint->set) {
1364 LOG_WARNING("breakpoint already set");
1365 return ERROR_OK;
1366 }
1367
1368 if (breakpoint->type == BKPT_HARD) {
1369 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1370 brp_i++;
1371 if (brp_i >= cortex_a8->brp_num) {
1372 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1373 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1374 }
1375 breakpoint->set = brp_i + 1;
1376 if (breakpoint->length == 2)
1377 byte_addr_select = (3 << (breakpoint->address & 0x02));
1378 control = ((matchmode & 0x7) << 20)
1379 | (byte_addr_select << 5)
1380 | (3 << 1) | 1;
1381 brp_list[brp_i].used = 1;
1382 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1383 brp_list[brp_i].control = control;
1384 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1385 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1386 brp_list[brp_i].value);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1390 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1391 brp_list[brp_i].control);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1395 brp_list[brp_i].control,
1396 brp_list[brp_i].value);
1397 } else if (breakpoint->type == BKPT_SOFT) {
1398 uint8_t code[4];
1399 if (breakpoint->length == 2)
1400 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1401 else
1402 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1403 retval = target_read_memory(target,
1404 breakpoint->address & 0xFFFFFFFE,
1405 breakpoint->length, 1,
1406 breakpoint->orig_instr);
1407 if (retval != ERROR_OK)
1408 return retval;
1409 retval = target_write_memory(target,
1410 breakpoint->address & 0xFFFFFFFE,
1411 breakpoint->length, 1, code);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 breakpoint->set = 0x11; /* Any nice value but 0 */
1415 }
1416
1417 return ERROR_OK;
1418 }
1419
1420 static int cortex_a8_set_context_breakpoint(struct target *target,
1421 struct breakpoint *breakpoint, uint8_t matchmode)
1422 {
1423 int retval = ERROR_FAIL;
1424 int brp_i = 0;
1425 uint32_t control;
1426 uint8_t byte_addr_select = 0x0F;
1427 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1428 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1429 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1430
1431 if (breakpoint->set) {
1432 LOG_WARNING("breakpoint already set");
1433 return retval;
1434 }
1435 /*check available context BRPs*/
1436 while ((brp_list[brp_i].used ||
1437 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1438 brp_i++;
1439
1440 if (brp_i >= cortex_a8->brp_num) {
1441 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1442 return ERROR_FAIL;
1443 }
1444
1445 breakpoint->set = brp_i + 1;
1446 control = ((matchmode & 0x7) << 20)
1447 | (byte_addr_select << 5)
1448 | (3 << 1) | 1;
1449 brp_list[brp_i].used = 1;
1450 brp_list[brp_i].value = (breakpoint->asid);
1451 brp_list[brp_i].control = control;
1452 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1453 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1454 brp_list[brp_i].value);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1458 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1459 brp_list[brp_i].control);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1463 brp_list[brp_i].control,
1464 brp_list[brp_i].value);
1465 return ERROR_OK;
1466
1467 }
1468
1469 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1470 {
1471 int retval = ERROR_FAIL;
1472 int brp_1 = 0; /* holds the contextID pair */
1473 int brp_2 = 0; /* holds the IVA pair */
1474 uint32_t control_CTX, control_IVA;
1475 uint8_t CTX_byte_addr_select = 0x0F;
1476 uint8_t IVA_byte_addr_select = 0x0F;
1477 uint8_t CTX_machmode = 0x03;
1478 uint8_t IVA_machmode = 0x01;
1479 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1480 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1481 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1482
1483 if (breakpoint->set) {
1484 LOG_WARNING("breakpoint already set");
1485 return retval;
1486 }
1487 /*check available context BRPs*/
1488 while ((brp_list[brp_1].used ||
1489 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1490 brp_1++;
1491
1492 printf("brp(CTX) found num: %d\n", brp_1);
1493 if (brp_1 >= cortex_a8->brp_num) {
1494 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1495 return ERROR_FAIL;
1496 }
1497
1498 while ((brp_list[brp_2].used ||
1499 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1500 brp_2++;
1501
1502 printf("brp(IVA) found num: %d\n", brp_2);
1503 if (brp_2 >= cortex_a8->brp_num) {
1504 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1505 return ERROR_FAIL;
1506 }
1507
1508 breakpoint->set = brp_1 + 1;
1509 breakpoint->linked_BRP = brp_2;
1510 control_CTX = ((CTX_machmode & 0x7) << 20)
1511 | (brp_2 << 16)
1512 | (0 << 14)
1513 | (CTX_byte_addr_select << 5)
1514 | (3 << 1) | 1;
1515 brp_list[brp_1].used = 1;
1516 brp_list[brp_1].value = (breakpoint->asid);
1517 brp_list[brp_1].control = control_CTX;
1518 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1519 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1520 brp_list[brp_1].value);
1521 if (retval != ERROR_OK)
1522 return retval;
1523 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1524 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1525 brp_list[brp_1].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1528
1529 control_IVA = ((IVA_machmode & 0x7) << 20)
1530 | (brp_1 << 16)
1531 | (IVA_byte_addr_select << 5)
1532 | (3 << 1) | 1;
1533 brp_list[brp_2].used = 1;
1534 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1535 brp_list[brp_2].control = control_IVA;
1536 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1537 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1538 brp_list[brp_2].value);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1542 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1543 brp_list[brp_2].control);
1544 if (retval != ERROR_OK)
1545 return retval;
1546
1547 return ERROR_OK;
1548 }
1549
1550 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1551 {
1552 int retval;
1553 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1554 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1555 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1556
1557 if (!breakpoint->set) {
1558 LOG_WARNING("breakpoint not set");
1559 return ERROR_OK;
1560 }
1561
1562 if (breakpoint->type == BKPT_HARD) {
1563 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1564 int brp_i = breakpoint->set - 1;
1565 int brp_j = breakpoint->linked_BRP;
1566 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1567 LOG_DEBUG("Invalid BRP number in breakpoint");
1568 return ERROR_OK;
1569 }
1570 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1571 brp_list[brp_i].control, brp_list[brp_i].value);
1572 brp_list[brp_i].used = 0;
1573 brp_list[brp_i].value = 0;
1574 brp_list[brp_i].control = 0;
1575 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1576 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1577 brp_list[brp_i].control);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1581 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1582 brp_list[brp_i].value);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num)) {
1586 LOG_DEBUG("Invalid BRP number in breakpoint");
1587 return ERROR_OK;
1588 }
1589 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1590 brp_list[brp_j].control, brp_list[brp_j].value);
1591 brp_list[brp_j].used = 0;
1592 brp_list[brp_j].value = 0;
1593 brp_list[brp_j].control = 0;
1594 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1595 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1596 brp_list[brp_j].control);
1597 if (retval != ERROR_OK)
1598 return retval;
1599 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1600 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1601 brp_list[brp_j].value);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 breakpoint->linked_BRP = 0;
1605 breakpoint->set = 0;
1606 return ERROR_OK;
1607
1608 } else {
1609 int brp_i = breakpoint->set - 1;
1610 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1611 LOG_DEBUG("Invalid BRP number in breakpoint");
1612 return ERROR_OK;
1613 }
1614 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1615 brp_list[brp_i].control, brp_list[brp_i].value);
1616 brp_list[brp_i].used = 0;
1617 brp_list[brp_i].value = 0;
1618 brp_list[brp_i].control = 0;
1619 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1620 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1621 brp_list[brp_i].control);
1622 if (retval != ERROR_OK)
1623 return retval;
1624 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1625 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1626 brp_list[brp_i].value);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 breakpoint->set = 0;
1630 return ERROR_OK;
1631 }
1632 } else {
1633 /* restore original instruction (kept in target endianness) */
1634 if (breakpoint->length == 4) {
1635 retval = target_write_memory(target,
1636 breakpoint->address & 0xFFFFFFFE,
1637 4, 1, breakpoint->orig_instr);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 } else {
1641 retval = target_write_memory(target,
1642 breakpoint->address & 0xFFFFFFFE,
1643 2, 1, breakpoint->orig_instr);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 }
1647 }
1648 breakpoint->set = 0;
1649
1650 return ERROR_OK;
1651 }
1652
1653 static int cortex_a8_add_breakpoint(struct target *target,
1654 struct breakpoint *breakpoint)
1655 {
1656 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1657
1658 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1659 LOG_INFO("no hardware breakpoint available");
1660 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1661 }
1662
1663 if (breakpoint->type == BKPT_HARD)
1664 cortex_a8->brp_num_available--;
1665
1666 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1667 }
1668
1669 static int cortex_a8_add_context_breakpoint(struct target *target,
1670 struct breakpoint *breakpoint)
1671 {
1672 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1673
1674 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1675 LOG_INFO("no hardware breakpoint available");
1676 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1677 }
1678
1679 if (breakpoint->type == BKPT_HARD)
1680 cortex_a8->brp_num_available--;
1681
1682 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1683 }
1684
1685 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1686 struct breakpoint *breakpoint)
1687 {
1688 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1689
1690 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1691 LOG_INFO("no hardware breakpoint available");
1692 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1693 }
1694
1695 if (breakpoint->type == BKPT_HARD)
1696 cortex_a8->brp_num_available--;
1697
1698 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1699 }
1700
1701
1702 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1703 {
1704 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1705
1706 #if 0
1707 /* It is perfectly possible to remove breakpoints while the target is running */
1708 if (target->state != TARGET_HALTED) {
1709 LOG_WARNING("target not halted");
1710 return ERROR_TARGET_NOT_HALTED;
1711 }
1712 #endif
1713
1714 if (breakpoint->set) {
1715 cortex_a8_unset_breakpoint(target, breakpoint);
1716 if (breakpoint->type == BKPT_HARD)
1717 cortex_a8->brp_num_available++;
1718 }
1719
1720
1721 return ERROR_OK;
1722 }
1723
1724 /*
1725 * Cortex-A8 Reset functions
1726 */
1727
1728 static int cortex_a8_assert_reset(struct target *target)
1729 {
1730 struct armv7a_common *armv7a = target_to_armv7a(target);
1731
1732 LOG_DEBUG(" ");
1733
1734 /* FIXME when halt is requested, make it work somehow... */
1735
1736 /* Issue some kind of warm reset. */
1737 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1738 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1739 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1740 /* REVISIT handle "pulls" cases, if there's
1741 * hardware that needs them to work.
1742 */
1743 jtag_add_reset(0, 1);
1744 } else {
1745 LOG_ERROR("%s: how to reset?", target_name(target));
1746 return ERROR_FAIL;
1747 }
1748
1749 /* registers are now invalid */
1750 register_cache_invalidate(armv7a->arm.core_cache);
1751
1752 target->state = TARGET_RESET;
1753
1754 return ERROR_OK;
1755 }
1756
1757 static int cortex_a8_deassert_reset(struct target *target)
1758 {
1759 int retval;
1760
1761 LOG_DEBUG(" ");
1762
1763 /* be certain SRST is off */
1764 jtag_add_reset(0, 0);
1765
1766 retval = cortex_a8_poll(target);
1767 if (retval != ERROR_OK)
1768 return retval;
1769
1770 if (target->reset_halt) {
1771 if (target->state != TARGET_HALTED) {
1772 LOG_WARNING("%s: ran after reset and before halt ...",
1773 target_name(target));
1774 retval = target_halt(target);
1775 if (retval != ERROR_OK)
1776 return retval;
1777 }
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int cortex_a8_write_apb_ab_memory(struct target *target,
1784 uint32_t address, uint32_t size,
1785 uint32_t count, const uint8_t *buffer)
1786 {
1787 /* write memory through APB-AP */
1788
1789 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1790 struct armv7a_common *armv7a = target_to_armv7a(target);
1791 struct arm *arm = &armv7a->arm;
1792 struct adiv5_dap *swjdp = armv7a->arm.dap;
1793 int total_bytes = count * size;
1794 int total_u32;
1795 int start_byte = address & 0x3;
1796 int end_byte = (address + total_bytes) & 0x3;
1797 struct reg *reg;
1798 uint32_t dscr;
1799 uint8_t *tmp_buff = NULL;
1800
1801 if (target->state != TARGET_HALTED) {
1802 LOG_WARNING("target not halted");
1803 return ERROR_TARGET_NOT_HALTED;
1804 }
1805
1806 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1807
1808 /* Mark register R0 as dirty, as it will be used
1809 * for transferring the data.
1810 * It will be restored automatically when exiting
1811 * debug mode
1812 */
1813 reg = arm_reg_current(arm, 0);
1814 reg->dirty = true;
1815
1816 /* clear any abort */
1817 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1818 if (retval != ERROR_OK)
1819 return retval;
1820
1821 /* This algorithm comes from either :
1822 * Cortex-A8 TRM Example 12-25
1823 * Cortex-R4 TRM Example 11-26
1824 * (slight differences)
1825 */
1826
1827 /* The algorithm only copies 32 bit words, so the buffer
1828 * should be expanded to include the words at either end.
1829 * The first and last words will be read first to avoid
1830 * corruption if needed.
1831 */
1832 tmp_buff = (uint8_t *) malloc(total_u32 << 2);
1833
1834
1835 if ((start_byte != 0) && (total_u32 > 1)) {
1836 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1837 * the other bytes in the word.
1838 */
1839 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1840 if (retval != ERROR_OK)
1841 goto error_free_buff_w;
1842 }
1843
1844 /* If end of write is not aligned, or the write is less than 4 bytes */
1845 if ((end_byte != 0) ||
1846 ((total_u32 == 1) && (total_bytes != 4))) {
1847
1848 /* Read the last word to avoid corruption during 32 bit write */
1849 int mem_offset = (total_u32-1) << 4;
1850 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1851 if (retval != ERROR_OK)
1852 goto error_free_buff_w;
1853 }
1854
1855 /* Copy the write buffer over the top of the temporary buffer */
1856 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1857
1858 /* We now have a 32 bit aligned buffer that can be written */
1859
1860 /* Read DSCR */
1861 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1862 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1863 if (retval != ERROR_OK)
1864 goto error_free_buff_w;
1865
1866 /* Set DTR mode to Fast (2) */
1867 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1868 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1869 armv7a->debug_base + CPUDBG_DSCR, dscr);
1870 if (retval != ERROR_OK)
1871 goto error_free_buff_w;
1872
1873 /* Copy the destination address into R0 */
1874 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1875 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1876 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1877 if (retval != ERROR_OK)
1878 goto error_unset_dtr_w;
1879 /* Write address into DTRRX, which triggers previous instruction */
1880 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1881 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1882 if (retval != ERROR_OK)
1883 goto error_unset_dtr_w;
1884
1885 /* Write the data transfer instruction into the ITR
1886 * (STC p14, c5, [R0], 4)
1887 */
1888 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1889 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1890 if (retval != ERROR_OK)
1891 goto error_unset_dtr_w;
1892
1893 /* Do the write */
1894 retval = mem_ap_sel_write_buf_u32_noincr(swjdp, armv7a->debug_ap,
1895 tmp_buff, (total_u32)<<2, armv7a->debug_base + CPUDBG_DTRRX);
1896 if (retval != ERROR_OK)
1897 goto error_unset_dtr_w;
1898
1899
1900 /* Switch DTR mode back to non-blocking (0) */
1901 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1902 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1903 armv7a->debug_base + CPUDBG_DSCR, dscr);
1904 if (retval != ERROR_OK)
1905 goto error_unset_dtr_w;
1906
1907 /* Check for sticky abort flags in the DSCR */
1908 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1909 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1910 if (retval != ERROR_OK)
1911 goto error_free_buff_w;
1912 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1913 /* Abort occurred - clear it and exit */
1914 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
1915 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1916 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1917 goto error_free_buff_w;
1918 }
1919
1920 /* Done */
1921 free(tmp_buff);
1922 return ERROR_OK;
1923
1924 error_unset_dtr_w:
1925 /* Unset DTR mode */
1926 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1927 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1928 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1929 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1930 armv7a->debug_base + CPUDBG_DSCR, dscr);
1931 error_free_buff_w:
1932 LOG_ERROR("error");
1933 free(tmp_buff);
1934 return ERROR_FAIL;
1935 }
1936
1937 static int cortex_a8_read_apb_ab_memory(struct target *target,
1938 uint32_t address, uint32_t size,
1939 uint32_t count, uint8_t *buffer)
1940 {
1941 /* read memory through APB-AP */
1942
1943 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1944 struct armv7a_common *armv7a = target_to_armv7a(target);
1945 struct adiv5_dap *swjdp = armv7a->arm.dap;
1946 struct arm *arm = &armv7a->arm;
1947 int total_bytes = count * size;
1948 int total_u32;
1949 int start_byte = address & 0x3;
1950 struct reg *reg;
1951 uint32_t dscr;
1952 char *tmp_buff = NULL;
1953 uint32_t buff32[2];
1954 if (target->state != TARGET_HALTED) {
1955 LOG_WARNING("target not halted");
1956 return ERROR_TARGET_NOT_HALTED;
1957 }
1958
1959 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1960
1961 /* Mark register R0 as dirty, as it will be used
1962 * for transferring the data.
1963 * It will be restored automatically when exiting
1964 * debug mode
1965 */
1966 reg = arm_reg_current(arm, 0);
1967 reg->dirty = true;
1968
1969 /* clear any abort */
1970 retval =
1971 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1972 if (retval != ERROR_OK)
1973 return retval;
1974
1975 /* Read DSCR */
1976 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1977 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1978
1979 /* This algorithm comes from either :
1980 * Cortex-A8 TRM Example 12-24
1981 * Cortex-R4 TRM Example 11-25
1982 * (slight differences)
1983 */
1984
1985 /* Set DTR access mode to stall mode b01 */
1986 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
1987 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1988 armv7a->debug_base + CPUDBG_DSCR, dscr);
1989
1990 /* Write R0 with value 'address' using write procedure for stall mode */
1991 /* - Write the address for read access into DTRRX */
1992 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1993 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
1994 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
1995 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
1996
1997
1998 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
1999 * and the DTR mode setting to fast mode
2000 * in one combined write (since they are adjacent registers)
2001 */
2002 buff32[0] = ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4);
2003 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2004 buff32[1] = dscr;
2005 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2006 retval += mem_ap_sel_write_buf_u32(swjdp, armv7a->debug_ap, (uint8_t *)buff32, 8,
2007 armv7a->debug_base + CPUDBG_ITR);
2008 if (retval != ERROR_OK)
2009 goto error_unset_dtr_r;
2010
2011
2012 /* Due to offset word alignment, the buffer may not have space
2013 * to read the full first and last int32 words,
2014 * hence, malloc space to read into, then copy and align into the buffer.
2015 */
2016 tmp_buff = (char *) malloc(total_u32<<2);
2017
2018 /* The last word needs to be handled separately - read all other words in one go.
2019 */
2020 if (total_u32 > 1) {
2021 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2022 * Abort flags are sticky, so can be read at end of transactions
2023 *
2024 * This data is read in aligned to 32 bit boundary, hence may need shifting later.
2025 */
2026 retval = mem_ap_sel_read_buf_u32_noincr(swjdp, armv7a->debug_ap, (uint8_t *)tmp_buff, (total_u32-1)<<2,
2027 armv7a->debug_base + CPUDBG_DTRTX);
2028 if (retval != ERROR_OK)
2029 goto error_unset_dtr_r;
2030 }
2031
2032 /* set DTR access mode back to non blocking b00 */
2033 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2034 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2035 armv7a->debug_base + CPUDBG_DSCR, dscr);
2036 if (retval != ERROR_OK)
2037 goto error_free_buff_r;
2038
2039 /* Wait for the final read instruction to finish */
2040 do {
2041 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2042 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2043 if (retval != ERROR_OK)
2044 goto error_free_buff_r;
2045 } while ((dscr & DSCR_INSTR_COMP) == 0);
2046
2047
2048 /* Check for sticky abort flags in the DSCR */
2049 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2050 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2051 if (retval != ERROR_OK)
2052 goto error_free_buff_r;
2053 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2054 /* Abort occurred - clear it and exit */
2055 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
2056 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2057 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2058 goto error_free_buff_r;
2059 }
2060
2061 /* Read the last word */
2062 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2063 armv7a->debug_base + CPUDBG_DTRTX, (uint32_t *)&tmp_buff[(total_u32-1)<<2]);
2064 if (retval != ERROR_OK)
2065 goto error_free_buff_r;
2066
2067 /* Copy and align the data into the output buffer */
2068 memcpy(buffer, &tmp_buff[start_byte], total_bytes);
2069
2070 free(tmp_buff);
2071
2072 /* Done */
2073 return ERROR_OK;
2074
2075
2076 error_unset_dtr_r:
2077 /* Unset DTR mode */
2078 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2079 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2080 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2081 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2082 armv7a->debug_base + CPUDBG_DSCR, dscr);
2083 error_free_buff_r:
2084 LOG_ERROR("error");
2085 free(tmp_buff);
2086 return ERROR_FAIL;
2087 }
2088
2089
2090 /*
2091 * Cortex-A8 Memory access
2092 *
2093 * This is same Cortex M3 but we must also use the correct
2094 * ap number for every access.
2095 */
2096
2097 static int cortex_a8_read_phys_memory(struct target *target,
2098 uint32_t address, uint32_t size,
2099 uint32_t count, uint8_t *buffer)
2100 {
2101 struct armv7a_common *armv7a = target_to_armv7a(target);
2102 struct adiv5_dap *swjdp = armv7a->arm.dap;
2103 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2104 uint8_t apsel = swjdp->apsel;
2105 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
2106 address, size, count);
2107
2108 if (count && buffer) {
2109
2110 if (apsel == swjdp_memoryap) {
2111
2112 /* read memory through AHB-AP */
2113
2114 switch (size) {
2115 case 4:
2116 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
2117 buffer, 4 * count, address);
2118 break;
2119 case 2:
2120 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
2121 buffer, 2 * count, address);
2122 break;
2123 case 1:
2124 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
2125 buffer, count, address);
2126 break;
2127 }
2128 } else {
2129
2130 /* read memory through APB-AP
2131 * disable mmu */
2132 retval = cortex_a8_mmu_modify(target, 0);
2133 if (retval != ERROR_OK)
2134 return retval;
2135 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2136 }
2137 }
2138 return retval;
2139 }
2140
2141 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2142 uint32_t size, uint32_t count, uint8_t *buffer)
2143 {
2144 int enabled = 0;
2145 uint32_t virt, phys;
2146 int retval;
2147 struct armv7a_common *armv7a = target_to_armv7a(target);
2148 struct adiv5_dap *swjdp = armv7a->arm.dap;
2149 uint8_t apsel = swjdp->apsel;
2150
2151 /* cortex_a8 handles unaligned memory access */
2152 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2153 size, count);
2154 if (apsel == swjdp_memoryap) {
2155 retval = cortex_a8_mmu(target, &enabled);
2156 if (retval != ERROR_OK)
2157 return retval;
2158
2159
2160 if (enabled) {
2161 virt = address;
2162 retval = cortex_a8_virt2phys(target, virt, &phys);
2163 if (retval != ERROR_OK)
2164 return retval;
2165
2166 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2167 virt, phys);
2168 address = phys;
2169 }
2170 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2171 } else {
2172 retval = cortex_a8_check_address(target, address);
2173 if (retval != ERROR_OK)
2174 return retval;
2175 /* enable mmu */
2176 retval = cortex_a8_mmu_modify(target, 1);
2177 if (retval != ERROR_OK)
2178 return retval;
2179 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2180 }
2181 return retval;
2182 }
2183
2184 static int cortex_a8_write_phys_memory(struct target *target,
2185 uint32_t address, uint32_t size,
2186 uint32_t count, const uint8_t *buffer)
2187 {
2188 struct armv7a_common *armv7a = target_to_armv7a(target);
2189 struct adiv5_dap *swjdp = armv7a->arm.dap;
2190 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2191 uint8_t apsel = swjdp->apsel;
2192
2193 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2194 size, count);
2195
2196 if (count && buffer) {
2197
2198 if (apsel == swjdp_memoryap) {
2199
2200 /* write memory through AHB-AP */
2201
2202 switch (size) {
2203 case 4:
2204 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
2205 buffer, 4 * count, address);
2206 break;
2207 case 2:
2208 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
2209 buffer, 2 * count, address);
2210 break;
2211 case 1:
2212 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
2213 buffer, count, address);
2214 break;
2215 }
2216
2217 } else {
2218
2219 /* write memory through APB-AP */
2220 retval = cortex_a8_mmu_modify(target, 0);
2221 if (retval != ERROR_OK)
2222 return retval;
2223 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2224 }
2225 }
2226
2227
2228 /* REVISIT this op is generic ARMv7-A/R stuff */
2229 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2230 struct arm_dpm *dpm = armv7a->arm.dpm;
2231
2232 retval = dpm->prepare(dpm);
2233 if (retval != ERROR_OK)
2234 return retval;
2235
2236 /* The Cache handling will NOT work with MMU active, the
2237 * wrong addresses will be invalidated!
2238 *
2239 * For both ICache and DCache, walk all cache lines in the
2240 * address range. Cortex-A8 has fixed 64 byte line length.
2241 *
2242 * REVISIT per ARMv7, these may trigger watchpoints ...
2243 */
2244
2245 /* invalidate I-Cache */
2246 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2247 /* ICIMVAU - Invalidate Cache single entry
2248 * with MVA to PoU
2249 * MCR p15, 0, r0, c7, c5, 1
2250 */
2251 for (uint32_t cacheline = address;
2252 cacheline < address + size * count;
2253 cacheline += 64) {
2254 retval = dpm->instr_write_data_r0(dpm,
2255 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2256 cacheline);
2257 if (retval != ERROR_OK)
2258 return retval;
2259 }
2260 }
2261
2262 /* invalidate D-Cache */
2263 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2264 /* DCIMVAC - Invalidate data Cache line
2265 * with MVA to PoC
2266 * MCR p15, 0, r0, c7, c6, 1
2267 */
2268 for (uint32_t cacheline = address;
2269 cacheline < address + size * count;
2270 cacheline += 64) {
2271 retval = dpm->instr_write_data_r0(dpm,
2272 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2273 cacheline);
2274 if (retval != ERROR_OK)
2275 return retval;
2276 }
2277 }
2278
2279 /* (void) */ dpm->finish(dpm);
2280 }
2281
2282 return retval;
2283 }
2284
2285 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2286 uint32_t size, uint32_t count, const uint8_t *buffer)
2287 {
2288 int enabled = 0;
2289 uint32_t virt, phys;
2290 int retval;
2291 struct armv7a_common *armv7a = target_to_armv7a(target);
2292 struct adiv5_dap *swjdp = armv7a->arm.dap;
2293 uint8_t apsel = swjdp->apsel;
2294 /* cortex_a8 handles unaligned memory access */
2295 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2296 size, count);
2297 if (apsel == swjdp_memoryap) {
2298
2299 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size,
2300 count);
2301 retval = cortex_a8_mmu(target, &enabled);
2302 if (retval != ERROR_OK)
2303 return retval;
2304
2305 if (enabled) {
2306 virt = address;
2307 retval = cortex_a8_virt2phys(target, virt, &phys);
2308 if (retval != ERROR_OK)
2309 return retval;
2310 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x",
2311 virt,
2312 phys);
2313 address = phys;
2314 }
2315
2316 retval = cortex_a8_write_phys_memory(target, address, size,
2317 count, buffer);
2318 } else {
2319 retval = cortex_a8_check_address(target, address);
2320 if (retval != ERROR_OK)
2321 return retval;
2322 /* enable mmu */
2323 retval = cortex_a8_mmu_modify(target, 1);
2324 if (retval != ERROR_OK)
2325 return retval;
2326 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2327 }
2328 return retval;
2329 }
2330
2331 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2332 uint32_t count, const uint8_t *buffer)
2333 {
2334 return cortex_a8_write_memory(target, address, 4, count, buffer);
2335 }
2336
2337 static int cortex_a8_handle_target_request(void *priv)
2338 {
2339 struct target *target = priv;
2340 struct armv7a_common *armv7a = target_to_armv7a(target);
2341 struct adiv5_dap *swjdp = armv7a->arm.dap;
2342 int retval;
2343
2344 if (!target_was_examined(target))
2345 return ERROR_OK;
2346 if (!target->dbg_msg_enabled)
2347 return ERROR_OK;
2348
2349 if (target->state == TARGET_RUNNING) {
2350 uint32_t request;
2351 uint32_t dscr;
2352 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2353 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2354
2355 /* check if we have data */
2356 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2357 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2358 armv7a->debug_base + CPUDBG_DTRTX, &request);
2359 if (retval == ERROR_OK) {
2360 target_request(target, request);
2361 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2362 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2363 }
2364 }
2365 }
2366
2367 return ERROR_OK;
2368 }
2369
2370 /*
2371 * Cortex-A8 target information and configuration
2372 */
2373
2374 static int cortex_a8_examine_first(struct target *target)
2375 {
2376 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2377 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2378 struct adiv5_dap *swjdp = armv7a->arm.dap;
2379 int i;
2380 int retval = ERROR_OK;
2381 uint32_t didr, ctypr, ttypr, cpuid;
2382
2383 /* We do one extra read to ensure DAP is configured,
2384 * we call ahbap_debugport_init(swjdp) instead
2385 */
2386 retval = ahbap_debugport_init(swjdp);
2387 if (retval != ERROR_OK)
2388 return retval;
2389
2390 if (!target->dbgbase_set) {
2391 uint32_t dbgbase;
2392 /* Get ROM Table base */
2393 uint32_t apid;
2394 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2395 if (retval != ERROR_OK)
2396 return retval;
2397 /* Lookup 0x15 -- Processor DAP */
2398 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2399 &armv7a->debug_base);
2400 if (retval != ERROR_OK)
2401 return retval;
2402 } else
2403 armv7a->debug_base = target->dbgbase;
2404
2405 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2406 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2407 if (retval != ERROR_OK)
2408 return retval;
2409
2410 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2411 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2412 if (retval != ERROR_OK) {
2413 LOG_DEBUG("Examine %s failed", "CPUID");
2414 return retval;
2415 }
2416
2417 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2418 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2419 if (retval != ERROR_OK) {
2420 LOG_DEBUG("Examine %s failed", "CTYPR");
2421 return retval;
2422 }
2423
2424 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2425 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2426 if (retval != ERROR_OK) {
2427 LOG_DEBUG("Examine %s failed", "TTYPR");
2428 return retval;
2429 }
2430
2431 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2432 armv7a->debug_base + CPUDBG_DIDR, &didr);
2433 if (retval != ERROR_OK) {
2434 LOG_DEBUG("Examine %s failed", "DIDR");
2435 return retval;
2436 }
2437
2438 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2439 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2440 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2441 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2442
2443 armv7a->arm.core_type = ARM_MODE_MON;
2444 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2445 if (retval != ERROR_OK)
2446 return retval;
2447
2448 /* Setup Breakpoint Register Pairs */
2449 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2450 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2451 cortex_a8->brp_num_available = cortex_a8->brp_num;
2452 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2453 /* cortex_a8->brb_enabled = ????; */
2454 for (i = 0; i < cortex_a8->brp_num; i++) {
2455 cortex_a8->brp_list[i].used = 0;
2456 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2457 cortex_a8->brp_list[i].type = BRP_NORMAL;
2458 else
2459 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2460 cortex_a8->brp_list[i].value = 0;
2461 cortex_a8->brp_list[i].control = 0;
2462 cortex_a8->brp_list[i].BRPn = i;
2463 }
2464
2465 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2466
2467 target_set_examined(target);
2468 return ERROR_OK;
2469 }
2470
2471 static int cortex_a8_examine(struct target *target)
2472 {
2473 int retval = ERROR_OK;
2474
2475 /* don't re-probe hardware after each reset */
2476 if (!target_was_examined(target))
2477 retval = cortex_a8_examine_first(target);
2478
2479 /* Configure core debug access */
2480 if (retval == ERROR_OK)
2481 retval = cortex_a8_init_debug_access(target);
2482
2483 return retval;
2484 }
2485
2486 /*
2487 * Cortex-A8 target creation and initialization
2488 */
2489
2490 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2491 struct target *target)
2492 {
2493 /* examine_first() does a bunch of this */
2494 return ERROR_OK;
2495 }
2496
2497 static int cortex_a8_init_arch_info(struct target *target,
2498 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2499 {
2500 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2501 struct adiv5_dap *dap = &armv7a->dap;
2502
2503 armv7a->arm.dap = dap;
2504
2505 /* Setup struct cortex_a8_common */
2506 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2507 /* tap has no dap initialized */
2508 if (!tap->dap) {
2509 armv7a->arm.dap = dap;
2510 /* Setup struct cortex_a8_common */
2511
2512 /* prepare JTAG information for the new target */
2513 cortex_a8->jtag_info.tap = tap;
2514 cortex_a8->jtag_info.scann_size = 4;
2515
2516 /* Leave (only) generic DAP stuff for debugport_init() */
2517 dap->jtag_info = &cortex_a8->jtag_info;
2518
2519 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2520 dap->tar_autoincr_block = (1 << 10);
2521 dap->memaccess_tck = 80;
2522 tap->dap = dap;
2523 } else
2524 armv7a->arm.dap = tap->dap;
2525
2526 cortex_a8->fast_reg_read = 0;
2527
2528 /* register arch-specific functions */
2529 armv7a->examine_debug_reason = NULL;
2530
2531 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2532
2533 armv7a->pre_restore_context = NULL;
2534
2535 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2536
2537
2538 /* arm7_9->handle_target_request = cortex_a8_handle_target_request; */
2539
2540 /* REVISIT v7a setup should be in a v7a-specific routine */
2541 armv7a_init_arch_info(target, armv7a);
2542 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2543
2544 return ERROR_OK;
2545 }
2546
2547 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2548 {
2549 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2550
2551 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2552 }
2553
2554
2555
2556 static int cortex_a8_mmu(struct target *target, int *enabled)
2557 {
2558 if (target->state != TARGET_HALTED) {
2559 LOG_ERROR("%s: target not halted", __func__);
2560 return ERROR_TARGET_INVALID;
2561 }
2562
2563 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2564 return ERROR_OK;
2565 }
2566
2567 static int cortex_a8_virt2phys(struct target *target,
2568 uint32_t virt, uint32_t *phys)
2569 {
2570 int retval = ERROR_FAIL;
2571 struct armv7a_common *armv7a = target_to_armv7a(target);
2572 struct adiv5_dap *swjdp = armv7a->arm.dap;
2573 uint8_t apsel = swjdp->apsel;
2574 if (apsel == swjdp_memoryap) {
2575 uint32_t ret;
2576 retval = armv7a_mmu_translate_va(target,
2577 virt, &ret);
2578 if (retval != ERROR_OK)
2579 goto done;
2580 *phys = ret;
2581 } else {/* use this method if swjdp_memoryap not selected
2582 * mmu must be enable in order to get a correct translation */
2583 retval = cortex_a8_mmu_modify(target, 1);
2584 if (retval != ERROR_OK)
2585 goto done;
2586 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2587 }
2588 done:
2589 return retval;
2590 }
2591
2592 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2593 {
2594 struct target *target = get_current_target(CMD_CTX);
2595 struct armv7a_common *armv7a = target_to_armv7a(target);
2596
2597 return armv7a_handle_cache_info_command(CMD_CTX,
2598 &armv7a->armv7a_mmu.armv7a_cache);
2599 }
2600
2601
2602 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2603 {
2604 struct target *target = get_current_target(CMD_CTX);
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 return cortex_a8_init_debug_access(target);
2611 }
2612 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2613 {
2614 struct target *target = get_current_target(CMD_CTX);
2615 /* check target is an smp target */
2616 struct target_list *head;
2617 struct target *curr;
2618 head = target->head;
2619 target->smp = 0;
2620 if (head != (struct target_list *)NULL) {
2621 while (head != (struct target_list *)NULL) {
2622 curr = head->target;
2623 curr->smp = 0;
2624 head = head->next;
2625 }
2626 /* fixes the target display to the debugger */
2627 target->gdb_service->target = target;
2628 }
2629 return ERROR_OK;
2630 }
2631
2632 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2633 {
2634 struct target *target = get_current_target(CMD_CTX);
2635 struct target_list *head;
2636 struct target *curr;
2637 head = target->head;
2638 if (head != (struct target_list *)NULL) {
2639 target->smp = 1;
2640 while (head != (struct target_list *)NULL) {
2641 curr = head->target;
2642 curr->smp = 1;
2643 head = head->next;
2644 }
2645 }
2646 return ERROR_OK;
2647 }
2648
2649 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2650 {
2651 struct target *target = get_current_target(CMD_CTX);
2652 int retval = ERROR_OK;
2653 struct target_list *head;
2654 head = target->head;
2655 if (head != (struct target_list *)NULL) {
2656 if (CMD_ARGC == 1) {
2657 int coreid = 0;
2658 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2659 if (ERROR_OK != retval)
2660 return retval;
2661 target->gdb_service->core[1] = coreid;
2662
2663 }
2664 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2665 , target->gdb_service->core[1]);
2666 }
2667 return ERROR_OK;
2668 }
2669
2670 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2671 {
2672 .name = "cache_info",
2673 .handler = cortex_a8_handle_cache_info_command,
2674 .mode = COMMAND_EXEC,
2675 .help = "display information about target caches",
2676 .usage = "",
2677 },
2678 {
2679 .name = "dbginit",
2680 .handler = cortex_a8_handle_dbginit_command,
2681 .mode = COMMAND_EXEC,
2682 .help = "Initialize core debug",
2683 .usage = "",
2684 },
2685 { .name = "smp_off",
2686 .handler = cortex_a8_handle_smp_off_command,
2687 .mode = COMMAND_EXEC,
2688 .help = "Stop smp handling",
2689 .usage = "",},
2690 {
2691 .name = "smp_on",
2692 .handler = cortex_a8_handle_smp_on_command,
2693 .mode = COMMAND_EXEC,
2694 .help = "Restart smp handling",
2695 .usage = "",
2696 },
2697 {
2698 .name = "smp_gdb",
2699 .handler = cortex_a8_handle_smp_gdb_command,
2700 .mode = COMMAND_EXEC,
2701 .help = "display/fix current core played to gdb",
2702 .usage = "",
2703 },
2704
2705
2706 COMMAND_REGISTRATION_DONE
2707 };
2708 static const struct command_registration cortex_a8_command_handlers[] = {
2709 {
2710 .chain = arm_command_handlers,
2711 },
2712 {
2713 .chain = armv7a_command_handlers,
2714 },
2715 {
2716 .name = "cortex_a8",
2717 .mode = COMMAND_ANY,
2718 .help = "Cortex-A8 command group",
2719 .usage = "",
2720 .chain = cortex_a8_exec_command_handlers,
2721 },
2722 COMMAND_REGISTRATION_DONE
2723 };
2724
2725 struct target_type cortexa8_target = {
2726 .name = "cortex_a8",
2727
2728 .poll = cortex_a8_poll,
2729 .arch_state = armv7a_arch_state,
2730
2731 .target_request_data = NULL,
2732
2733 .halt = cortex_a8_halt,
2734 .resume = cortex_a8_resume,
2735 .step = cortex_a8_step,
2736
2737 .assert_reset = cortex_a8_assert_reset,
2738 .deassert_reset = cortex_a8_deassert_reset,
2739 .soft_reset_halt = NULL,
2740
2741 /* REVISIT allow exporting VFP3 registers ... */
2742 .get_gdb_reg_list = arm_get_gdb_reg_list,
2743
2744 .read_memory = cortex_a8_read_memory,
2745 .write_memory = cortex_a8_write_memory,
2746 .bulk_write_memory = cortex_a8_bulk_write_memory,
2747
2748 .checksum_memory = arm_checksum_memory,
2749 .blank_check_memory = arm_blank_check_memory,
2750
2751 .run_algorithm = armv4_5_run_algorithm,
2752
2753 .add_breakpoint = cortex_a8_add_breakpoint,
2754 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2755 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2756 .remove_breakpoint = cortex_a8_remove_breakpoint,
2757 .add_watchpoint = NULL,
2758 .remove_watchpoint = NULL,
2759
2760 .commands = cortex_a8_command_handlers,
2761 .target_create = cortex_a8_target_create,
2762 .init_target = cortex_a8_init_target,
2763 .examine = cortex_a8_examine,
2764
2765 .read_phys_memory = cortex_a8_read_phys_memory,
2766 .write_phys_memory = cortex_a8_write_phys_memory,
2767 .mmu = cortex_a8_mmu,
2768 .virt2phys = cortex_a8_virt2phys,
2769 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)