cortex_m3: use armv7m's async algorithm implementation
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39 #ifdef HAVE_CONFIG_H
40 #include "config.h"
41 #endif
42
43 #include "breakpoints.h"
44 #include "cortex_a.h"
45 #include "register.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_opcodes.h"
49 #include <helper/time_support.h>
50
51 static int cortex_a8_poll(struct target *target);
52 static int cortex_a8_debug_entry(struct target *target);
53 static int cortex_a8_restore_context(struct target *target, bool bpwp);
54 static int cortex_a8_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int cortex_a8_set_context_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
59 struct breakpoint *breakpoint);
60 static int cortex_a8_unset_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
63 uint32_t *value, int regnum);
64 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
65 uint32_t value, int regnum);
66 static int cortex_a8_mmu(struct target *target, int *enabled);
67 static int cortex_a8_virt2phys(struct target *target,
68 uint32_t virt, uint32_t *phys);
69
70 /*
71 * FIXME do topology discovery using the ROM; don't
72 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
73 * cores, with different AP numbering ... don't use a #define
74 * for these numbers, use per-core armv7a state.
75 */
76 #define swjdp_memoryap 0
77 #define swjdp_debugap 1
78
79 /* restore cp15_control_reg at resume */
80 static int cortex_a8_restore_cp15_control_reg(struct target* target)
81 {
82 int retval = ERROR_OK;
83 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
84 struct armv7a_common *armv7a = target_to_armv7a(target);
85
86 if (cortex_a8->cp15_control_reg !=cortex_a8->cp15_control_reg_curr)
87 {
88 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
89 //LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
90 retval = armv7a->armv4_5_common.mcr(target, 15,
91 0, 0, /* op1, op2 */
92 1, 0, /* CRn, CRm */
93 cortex_a8->cp15_control_reg);
94 }
95 return ERROR_OK;
96 }
97
98 /* check address before cortex_a8_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int cortex_a8_check_address(struct target *target, uint32_t address)
101 {
102 struct armv7a_common *armv7a = target_to_armv7a(target);
103 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
104 uint32_t os_border = armv7a->armv7a_mmu.os_border;
105 if ((address < os_border) &&
106 (armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)){
107 LOG_ERROR("%x access in userspace and target in supervisor",address);
108 return ERROR_FAIL;
109 }
110 if ((address >= os_border) &&
111 ( cortex_a8->curr_mode != ARM_MODE_SVC)){
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a8->curr_mode = ARM_MODE_SVC;
114 LOG_INFO("%x access in kernel space and target not in supervisor",
115 address);
116 return ERROR_OK;
117 }
118 if ((address < os_border) &&
119 (cortex_a8->curr_mode == ARM_MODE_SVC)){
120 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
121 cortex_a8->curr_mode = ARM_MODE_ANY;
122 }
123 return ERROR_OK;
124 }
125 /* modify cp15_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int cortex_a8_mmu_modify(struct target *target, int enable)
129 {
130 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 int retval = ERROR_OK;
133 if (enable)
134 {
135 /* if mmu enabled at target stop and mmu not enable */
136 if (!(cortex_a8->cp15_control_reg & 0x1U))
137 {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a8->cp15_control_reg_curr & 0x1U))
142 {
143 cortex_a8->cp15_control_reg_curr |= 0x1U;
144 retval = armv7a->armv4_5_common.mcr(target, 15,
145 0, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 cortex_a8->cp15_control_reg_curr);
148 }
149 }
150 else
151 {
152 if (cortex_a8->cp15_control_reg_curr & 0x4U)
153 {
154 /* data cache is active */
155 cortex_a8->cp15_control_reg_curr &= ~0x4U;
156 /* flush data cache armv7 function to be called */
157 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
158 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
159 }
160 if ( (cortex_a8->cp15_control_reg_curr & 0x1U))
161 {
162 cortex_a8->cp15_control_reg_curr &= ~0x1U;
163 retval = armv7a->armv4_5_common.mcr(target, 15,
164 0, 0, /* op1, op2 */
165 1, 0, /* CRn, CRm */
166 cortex_a8->cp15_control_reg_curr);
167 }
168 }
169 return retval;
170 }
171
172 /*
173 * Cortex-A8 Basic debug access, very low level assumes state is saved
174 */
175 static int cortex_a8_init_debug_access(struct target *target)
176 {
177 struct armv7a_common *armv7a = target_to_armv7a(target);
178 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
179 int retval;
180 uint32_t dummy;
181
182 LOG_DEBUG(" ");
183
184 /* Unlocking the debug registers for modification */
185 /* The debugport might be uninitialised so try twice */
186 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
187 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
188 if (retval != ERROR_OK)
189 {
190 /* try again */
191 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
192 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
193 if (retval == ERROR_OK)
194 {
195 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
196 }
197 }
198 if (retval != ERROR_OK)
199 return retval;
200 /* Clear Sticky Power Down status Bit in PRSR to enable access to
201 the registers in the Core Power Domain */
202 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
203 armv7a->debug_base + CPUDBG_PRSR, &dummy);
204 if (retval != ERROR_OK)
205 return retval;
206
207 /* Enabling of instruction execution in debug mode is done in debug_entry code */
208
209 /* Resync breakpoint registers */
210
211 /* Since this is likely called from init or reset, update target state information*/
212 return cortex_a8_poll(target);
213 }
214
215 /* To reduce needless round-trips, pass in a pointer to the current
216 * DSCR value. Initialize it to zero if you just need to know the
217 * value on return from this function; or DSCR_INSTR_COMP if you
218 * happen to know that no instruction is pending.
219 */
220 static int cortex_a8_exec_opcode(struct target *target,
221 uint32_t opcode, uint32_t *dscr_p)
222 {
223 uint32_t dscr;
224 int retval;
225 struct armv7a_common *armv7a = target_to_armv7a(target);
226 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
227
228 dscr = dscr_p ? *dscr_p : 0;
229
230 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
231
232 /* Wait for InstrCompl bit to be set */
233 long long then = timeval_ms();
234 while ((dscr & DSCR_INSTR_COMP) == 0)
235 {
236 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
237 armv7a->debug_base + CPUDBG_DSCR, &dscr);
238 if (retval != ERROR_OK)
239 {
240 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
241 return retval;
242 }
243 if (timeval_ms() > then + 1000)
244 {
245 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
246 return ERROR_FAIL;
247 }
248 }
249
250 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
251 armv7a->debug_base + CPUDBG_ITR, opcode);
252 if (retval != ERROR_OK)
253 return retval;
254
255 then = timeval_ms();
256 do
257 {
258 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
259 armv7a->debug_base + CPUDBG_DSCR, &dscr);
260 if (retval != ERROR_OK)
261 {
262 LOG_ERROR("Could not read DSCR register");
263 return retval;
264 }
265 if (timeval_ms() > then + 1000)
266 {
267 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
272
273 if (dscr_p)
274 *dscr_p = dscr;
275
276 return retval;
277 }
278
279 /**************************************************************************
280 Read core register with very few exec_opcode, fast but needs work_area.
281 This can cause problems with MMU active.
282 **************************************************************************/
283 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
284 uint32_t * regfile)
285 {
286 int retval = ERROR_OK;
287 struct armv7a_common *armv7a = target_to_armv7a(target);
288 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
289
290 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
291 if (retval != ERROR_OK)
292 return retval;
293 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
294 if (retval != ERROR_OK)
295 return retval;
296 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
297 if (retval != ERROR_OK)
298 return retval;
299
300 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
301 (uint8_t *)(&regfile[1]), 4*15, address);
302
303 return retval;
304 }
305
306 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
307 uint32_t *value, int regnum)
308 {
309 int retval = ERROR_OK;
310 uint8_t reg = regnum&0xFF;
311 uint32_t dscr = 0;
312 struct armv7a_common *armv7a = target_to_armv7a(target);
313 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
314
315 if (reg > 17)
316 return retval;
317
318 if (reg < 15)
319 {
320 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
321 retval = cortex_a8_exec_opcode(target,
322 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
323 &dscr);
324 if (retval != ERROR_OK)
325 return retval;
326 }
327 else if (reg == 15)
328 {
329 /* "MOV r0, r15"; then move r0 to DCCTX */
330 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
331 if (retval != ERROR_OK)
332 return retval;
333 retval = cortex_a8_exec_opcode(target,
334 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
335 &dscr);
336 if (retval != ERROR_OK)
337 return retval;
338 }
339 else
340 {
341 /* "MRS r0, CPSR" or "MRS r0, SPSR"
342 * then move r0 to DCCTX
343 */
344 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
345 if (retval != ERROR_OK)
346 return retval;
347 retval = cortex_a8_exec_opcode(target,
348 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
349 &dscr);
350 if (retval != ERROR_OK)
351 return retval;
352 }
353
354 /* Wait for DTRRXfull then read DTRRTX */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0)
357 {
358 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
359 armv7a->debug_base + CPUDBG_DSCR, &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000)
363 {
364 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
365 return ERROR_FAIL;
366 }
367 }
368
369 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
370 armv7a->debug_base + CPUDBG_DTRTX, value);
371 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
372
373 return retval;
374 }
375
376 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
377 uint32_t value, int regnum)
378 {
379 int retval = ERROR_OK;
380 uint8_t Rd = regnum&0xFF;
381 uint32_t dscr;
382 struct armv7a_common *armv7a = target_to_armv7a(target);
383 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
384
385 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
386
387 /* Check that DCCRX is not full */
388 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
389 armv7a->debug_base + CPUDBG_DSCR, &dscr);
390 if (retval != ERROR_OK)
391 return retval;
392 if (dscr & DSCR_DTR_RX_FULL)
393 {
394 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
395 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
396 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
397 &dscr);
398 if (retval != ERROR_OK)
399 return retval;
400 }
401
402 if (Rd > 17)
403 return retval;
404
405 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
406 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
407 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
408 armv7a->debug_base + CPUDBG_DTRRX, value);
409 if (retval != ERROR_OK)
410 return retval;
411
412 if (Rd < 15)
413 {
414 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
415 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
416 &dscr);
417
418 if (retval != ERROR_OK)
419 return retval;
420 }
421 else if (Rd == 15)
422 {
423 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
424 * then "mov r15, r0"
425 */
426 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
427 &dscr);
428 if (retval != ERROR_OK)
429 return retval;
430 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
431 if (retval != ERROR_OK)
432 return retval;
433 }
434 else
435 {
436 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
437 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
438 */
439 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
440 &dscr);
441 if (retval != ERROR_OK)
442 return retval;
443 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
444 &dscr);
445 if (retval != ERROR_OK)
446 return retval;
447
448 /* "Prefetch flush" after modifying execution status in CPSR */
449 if (Rd == 16)
450 {
451 retval = cortex_a8_exec_opcode(target,
452 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
453 &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456 }
457 }
458
459 return retval;
460 }
461
462 /* Write to memory mapped registers directly with no cache or mmu handling */
463 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
464 {
465 int retval;
466 struct armv7a_common *armv7a = target_to_armv7a(target);
467 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
468
469 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
470
471 return retval;
472 }
473
474 /*
475 * Cortex-A8 implementation of Debug Programmer's Model
476 *
477 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
478 * so there's no need to poll for it before executing an instruction.
479 *
480 * NOTE that in several of these cases the "stall" mode might be useful.
481 * It'd let us queue a few operations together... prepare/finish might
482 * be the places to enable/disable that mode.
483 */
484
485 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
486 {
487 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
488 }
489
490 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
491 {
492 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
493 return mem_ap_sel_write_u32(a8->armv7a_common.armv4_5_common.dap,
494 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
495 }
496
497 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
498 uint32_t *dscr_p)
499 {
500 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
501 uint32_t dscr = DSCR_INSTR_COMP;
502 int retval;
503
504 if (dscr_p)
505 dscr = *dscr_p;
506
507 /* Wait for DTRRXfull */
508 long long then = timeval_ms();
509 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
510 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
511 a8->armv7a_common.debug_base + CPUDBG_DSCR,
512 &dscr);
513 if (retval != ERROR_OK)
514 return retval;
515 if (timeval_ms() > then + 1000)
516 {
517 LOG_ERROR("Timeout waiting for read dcc");
518 return ERROR_FAIL;
519 }
520 }
521
522 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
523 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
524 if (retval != ERROR_OK)
525 return retval;
526 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
527
528 if (dscr_p)
529 *dscr_p = dscr;
530
531 return retval;
532 }
533
534 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
535 {
536 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
537 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
538 uint32_t dscr;
539 int retval;
540
541 /* set up invariant: INSTR_COMP is set after ever DPM operation */
542 long long then = timeval_ms();
543 for (;;)
544 {
545 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
546 a8->armv7a_common.debug_base + CPUDBG_DSCR,
547 &dscr);
548 if (retval != ERROR_OK)
549 return retval;
550 if ((dscr & DSCR_INSTR_COMP) != 0)
551 break;
552 if (timeval_ms() > then + 1000)
553 {
554 LOG_ERROR("Timeout waiting for dpm prepare");
555 return ERROR_FAIL;
556 }
557 }
558
559 /* this "should never happen" ... */
560 if (dscr & DSCR_DTR_RX_FULL) {
561 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
562 /* Clear DCCRX */
563 retval = cortex_a8_exec_opcode(
564 a8->armv7a_common.armv4_5_common.target,
565 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
566 &dscr);
567 if (retval != ERROR_OK)
568 return retval;
569 }
570
571 return retval;
572 }
573
574 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
575 {
576 /* REVISIT what could be done here? */
577 return ERROR_OK;
578 }
579
580 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
581 uint32_t opcode, uint32_t data)
582 {
583 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
584 int retval;
585 uint32_t dscr = DSCR_INSTR_COMP;
586
587 retval = cortex_a8_write_dcc(a8, data);
588 if (retval != ERROR_OK)
589 return retval;
590
591 return cortex_a8_exec_opcode(
592 a8->armv7a_common.armv4_5_common.target,
593 opcode,
594 &dscr);
595 }
596
597 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
598 uint32_t opcode, uint32_t data)
599 {
600 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
601 uint32_t dscr = DSCR_INSTR_COMP;
602 int retval;
603
604 retval = cortex_a8_write_dcc(a8, data);
605 if (retval != ERROR_OK)
606 return retval;
607
608 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
609 retval = cortex_a8_exec_opcode(
610 a8->armv7a_common.armv4_5_common.target,
611 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
612 &dscr);
613 if (retval != ERROR_OK)
614 return retval;
615
616 /* then the opcode, taking data from R0 */
617 retval = cortex_a8_exec_opcode(
618 a8->armv7a_common.armv4_5_common.target,
619 opcode,
620 &dscr);
621
622 return retval;
623 }
624
625 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
626 {
627 struct target *target = dpm->arm->target;
628 uint32_t dscr = DSCR_INSTR_COMP;
629
630 /* "Prefetch flush" after modifying execution status in CPSR */
631 return cortex_a8_exec_opcode(target,
632 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
633 &dscr);
634 }
635
636 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
637 uint32_t opcode, uint32_t *data)
638 {
639 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
640 int retval;
641 uint32_t dscr = DSCR_INSTR_COMP;
642
643 /* the opcode, writing data to DCC */
644 retval = cortex_a8_exec_opcode(
645 a8->armv7a_common.armv4_5_common.target,
646 opcode,
647 &dscr);
648 if (retval != ERROR_OK)
649 return retval;
650
651 return cortex_a8_read_dcc(a8, data, &dscr);
652 }
653
654
655 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
656 uint32_t opcode, uint32_t *data)
657 {
658 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
659 uint32_t dscr = DSCR_INSTR_COMP;
660 int retval;
661
662 /* the opcode, writing data to R0 */
663 retval = cortex_a8_exec_opcode(
664 a8->armv7a_common.armv4_5_common.target,
665 opcode,
666 &dscr);
667 if (retval != ERROR_OK)
668 return retval;
669
670 /* write R0 to DCC */
671 retval = cortex_a8_exec_opcode(
672 a8->armv7a_common.armv4_5_common.target,
673 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
674 &dscr);
675 if (retval != ERROR_OK)
676 return retval;
677
678 return cortex_a8_read_dcc(a8, data, &dscr);
679 }
680
681 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
682 uint32_t addr, uint32_t control)
683 {
684 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
685 uint32_t vr = a8->armv7a_common.debug_base;
686 uint32_t cr = a8->armv7a_common.debug_base;
687 int retval;
688
689 switch (index_t) {
690 case 0 ... 15: /* breakpoints */
691 vr += CPUDBG_BVR_BASE;
692 cr += CPUDBG_BCR_BASE;
693 break;
694 case 16 ... 31: /* watchpoints */
695 vr += CPUDBG_WVR_BASE;
696 cr += CPUDBG_WCR_BASE;
697 index_t -= 16;
698 break;
699 default:
700 return ERROR_FAIL;
701 }
702 vr += 4 * index_t;
703 cr += 4 * index_t;
704
705 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
706 (unsigned) vr, (unsigned) cr);
707
708 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
709 vr, addr);
710 if (retval != ERROR_OK)
711 return retval;
712 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
713 cr, control);
714 return retval;
715 }
716
717 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
718 {
719 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
720 uint32_t cr;
721
722 switch (index_t) {
723 case 0 ... 15:
724 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
725 break;
726 case 16 ... 31:
727 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
728 index_t -= 16;
729 break;
730 default:
731 return ERROR_FAIL;
732 }
733 cr += 4 * index_t;
734
735 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
736
737 /* clear control register */
738 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
739 }
740
741 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
742 {
743 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
744 int retval;
745
746 dpm->arm = &a8->armv7a_common.armv4_5_common;
747 dpm->didr = didr;
748
749 dpm->prepare = cortex_a8_dpm_prepare;
750 dpm->finish = cortex_a8_dpm_finish;
751
752 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
753 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
754 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
755
756 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
757 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
758
759 dpm->bpwp_enable = cortex_a8_bpwp_enable;
760 dpm->bpwp_disable = cortex_a8_bpwp_disable;
761
762 retval = arm_dpm_setup(dpm);
763 if (retval == ERROR_OK)
764 retval = arm_dpm_initialize(dpm);
765
766 return retval;
767 }
768 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
769 {
770 struct target_list *head;
771 struct target *curr;
772
773 head = target->head;
774 while(head != (struct target_list*)NULL)
775 {
776 curr = head->target;
777 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
778 {
779 return curr;
780 }
781 head = head->next;
782 }
783 return target;
784 }
785 static int cortex_a8_halt(struct target *target);
786
787 static int cortex_a8_halt_smp(struct target *target)
788 {
789 int retval = 0;
790 struct target_list *head;
791 struct target *curr;
792 head = target->head;
793 while(head != (struct target_list*)NULL)
794 {
795 curr = head->target;
796 if ((curr != target) && (curr->state!= TARGET_HALTED))
797 {
798 retval += cortex_a8_halt(curr);
799 }
800 head = head->next;
801 }
802 return retval;
803 }
804
805 static int update_halt_gdb(struct target *target)
806 {
807 int retval = 0;
808 if (target->gdb_service->core[0]==-1)
809 {
810 target->gdb_service->target = target;
811 target->gdb_service->core[0] = target->coreid;
812 retval += cortex_a8_halt_smp(target);
813 }
814 return retval;
815 }
816
817 /*
818 * Cortex-A8 Run control
819 */
820
821 static int cortex_a8_poll(struct target *target)
822 {
823 int retval = ERROR_OK;
824 uint32_t dscr;
825 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
826 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
827 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
828 enum target_state prev_target_state = target->state;
829 // toggle to another core is done by gdb as follow
830 // maint packet J core_id
831 // continue
832 // the next polling trigger an halt event sent to gdb
833 if ((target->state == TARGET_HALTED) && (target->smp) &&
834 (target->gdb_service) &&
835 (target->gdb_service->target==NULL) )
836 {
837 target->gdb_service->target =
838 get_cortex_a8(target, target->gdb_service->core[1]);
839 target_call_event_callbacks(target,
840 TARGET_EVENT_HALTED);
841 return retval;
842 }
843 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
844 armv7a->debug_base + CPUDBG_DSCR, &dscr);
845 if (retval != ERROR_OK)
846 {
847 return retval;
848 }
849 cortex_a8->cpudbg_dscr = dscr;
850
851 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
852 {
853 if (prev_target_state != TARGET_HALTED)
854 {
855 /* We have a halting debug event */
856 LOG_DEBUG("Target halted");
857 target->state = TARGET_HALTED;
858 if ((prev_target_state == TARGET_RUNNING)
859 || (prev_target_state == TARGET_RESET))
860 {
861 retval = cortex_a8_debug_entry(target);
862 if (retval != ERROR_OK)
863 return retval;
864 if (target->smp)
865 {
866 retval = update_halt_gdb(target);
867 if (retval != ERROR_OK)
868 return retval;
869 }
870 target_call_event_callbacks(target,
871 TARGET_EVENT_HALTED);
872 }
873 if (prev_target_state == TARGET_DEBUG_RUNNING)
874 {
875 LOG_DEBUG(" ");
876
877 retval = cortex_a8_debug_entry(target);
878 if (retval != ERROR_OK)
879 return retval;
880 if (target->smp)
881 {
882 retval = update_halt_gdb(target);
883 if (retval != ERROR_OK)
884 return retval;
885 }
886
887 target_call_event_callbacks(target,
888 TARGET_EVENT_DEBUG_HALTED);
889 }
890 }
891 }
892 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
893 {
894 target->state = TARGET_RUNNING;
895 }
896 else
897 {
898 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
899 target->state = TARGET_UNKNOWN;
900 }
901
902 return retval;
903 }
904
905 static int cortex_a8_halt(struct target *target)
906 {
907 int retval = ERROR_OK;
908 uint32_t dscr;
909 struct armv7a_common *armv7a = target_to_armv7a(target);
910 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
911
912 /*
913 * Tell the core to be halted by writing DRCR with 0x1
914 * and then wait for the core to be halted.
915 */
916 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
917 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
918 if (retval != ERROR_OK)
919 return retval;
920
921 /*
922 * enter halting debug mode
923 */
924 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
925 armv7a->debug_base + CPUDBG_DSCR, &dscr);
926 if (retval != ERROR_OK)
927 return retval;
928
929 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
930 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
931 if (retval != ERROR_OK)
932 return retval;
933
934 long long then = timeval_ms();
935 for (;;)
936 {
937 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
938 armv7a->debug_base + CPUDBG_DSCR, &dscr);
939 if (retval != ERROR_OK)
940 return retval;
941 if ((dscr & DSCR_CORE_HALTED) != 0)
942 {
943 break;
944 }
945 if (timeval_ms() > then + 1000)
946 {
947 LOG_ERROR("Timeout waiting for halt");
948 return ERROR_FAIL;
949 }
950 }
951
952 target->debug_reason = DBG_REASON_DBGRQ;
953
954 return ERROR_OK;
955 }
956
957 static int cortex_a8_internal_restore(struct target *target, int current,
958 uint32_t *address, int handle_breakpoints, int debug_execution)
959 {
960 struct armv7a_common *armv7a = target_to_armv7a(target);
961 struct arm *armv4_5 = &armv7a->armv4_5_common;
962 int retval;
963 uint32_t resume_pc;
964
965 if (!debug_execution)
966 target_free_all_working_areas(target);
967
968 #if 0
969 if (debug_execution)
970 {
971 /* Disable interrupts */
972 /* We disable interrupts in the PRIMASK register instead of
973 * masking with C_MASKINTS,
974 * This is probably the same issue as Cortex-M3 Errata 377493:
975 * C_MASKINTS in parallel with disabled interrupts can cause
976 * local faults to not be taken. */
977 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
978 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
979 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
980
981 /* Make sure we are in Thumb mode */
982 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
983 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
984 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
985 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
986 }
987 #endif
988
989 /* current = 1: continue on current pc, otherwise continue at <address> */
990 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
991 if (!current)
992 resume_pc = *address;
993 else
994 *address = resume_pc;
995
996 /* Make sure that the Armv7 gdb thumb fixups does not
997 * kill the return address
998 */
999 switch (armv4_5->core_state)
1000 {
1001 case ARM_STATE_ARM:
1002 resume_pc &= 0xFFFFFFFC;
1003 break;
1004 case ARM_STATE_THUMB:
1005 case ARM_STATE_THUMB_EE:
1006 /* When the return address is loaded into PC
1007 * bit 0 must be 1 to stay in Thumb state
1008 */
1009 resume_pc |= 0x1;
1010 break;
1011 case ARM_STATE_JAZELLE:
1012 LOG_ERROR("How do I resume into Jazelle state??");
1013 return ERROR_FAIL;
1014 }
1015 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1016 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
1017 armv4_5->pc->dirty = 1;
1018 armv4_5->pc->valid = 1;
1019 /* restore dpm_mode at system halt */
1020 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1021 /* called it now before restoring context because it uses cpu
1022 * register r0 for restoring cp15 control register */
1023 retval = cortex_a8_restore_cp15_control_reg(target);
1024 retval = cortex_a8_restore_context(target, handle_breakpoints);
1025 if (retval != ERROR_OK)
1026 return retval;
1027 target->debug_reason = DBG_REASON_NOTHALTED;
1028 target->state = TARGET_RUNNING;
1029
1030 /* registers are now invalid */
1031 register_cache_invalidate(armv4_5->core_cache);
1032
1033 #if 0
1034 /* the front-end may request us not to handle breakpoints */
1035 if (handle_breakpoints)
1036 {
1037 /* Single step past breakpoint at current address */
1038 if ((breakpoint = breakpoint_find(target, resume_pc)))
1039 {
1040 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1041 cortex_m3_unset_breakpoint(target, breakpoint);
1042 cortex_m3_single_step_core(target);
1043 cortex_m3_set_breakpoint(target, breakpoint);
1044 }
1045 }
1046
1047 #endif
1048 return retval;
1049 }
1050
1051 static int cortex_a8_internal_restart(struct target *target)
1052 {
1053 struct armv7a_common *armv7a = target_to_armv7a(target);
1054 struct arm *armv4_5 = &armv7a->armv4_5_common;
1055 struct adiv5_dap *swjdp = armv4_5->dap;
1056 int retval;
1057 uint32_t dscr;
1058 /*
1059 * Restart core and wait for it to be started. Clear ITRen and sticky
1060 * exception flags: see ARMv7 ARM, C5.9.
1061 *
1062 * REVISIT: for single stepping, we probably want to
1063 * disable IRQs by default, with optional override...
1064 */
1065
1066 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1067 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1068 if (retval != ERROR_OK)
1069 return retval;
1070
1071 if ((dscr & DSCR_INSTR_COMP) == 0)
1072 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1073
1074 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1075 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1076 if (retval != ERROR_OK)
1077 return retval;
1078
1079 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1080 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1081 DRCR_CLEAR_EXCEPTIONS);
1082 if (retval != ERROR_OK)
1083 return retval;
1084
1085 long long then = timeval_ms();
1086 for (;;)
1087 {
1088 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1089 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1090 if (retval != ERROR_OK)
1091 return retval;
1092 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1093 break;
1094 if (timeval_ms() > then + 1000)
1095 {
1096 LOG_ERROR("Timeout waiting for resume");
1097 return ERROR_FAIL;
1098 }
1099 }
1100
1101 target->debug_reason = DBG_REASON_NOTHALTED;
1102 target->state = TARGET_RUNNING;
1103
1104 /* registers are now invalid */
1105 register_cache_invalidate(armv4_5->core_cache);
1106
1107 return ERROR_OK;
1108 }
1109
1110 static int cortex_a8_restore_smp(struct target *target,int handle_breakpoints)
1111 {
1112 int retval = 0;
1113 struct target_list *head;
1114 struct target *curr;
1115 uint32_t address;
1116 head = target->head;
1117 while(head != (struct target_list*)NULL)
1118 {
1119 curr = head->target;
1120 if ((curr != target) && (curr->state != TARGET_RUNNING))
1121 {
1122 /* resume current address , not in step mode */
1123 retval += cortex_a8_internal_restore(curr, 1, &address,
1124 handle_breakpoints, 0);
1125 retval += cortex_a8_internal_restart(curr);
1126 }
1127 head = head->next;
1128
1129 }
1130 return retval;
1131 }
1132
1133 static int cortex_a8_resume(struct target *target, int current,
1134 uint32_t address, int handle_breakpoints, int debug_execution)
1135 {
1136 int retval = 0;
1137 /* dummy resume for smp toggle in order to reduce gdb impact */
1138 if ((target->smp) && (target->gdb_service->core[1]!=-1))
1139 {
1140 /* simulate a start and halt of target */
1141 target->gdb_service->target = NULL;
1142 target->gdb_service->core[0] = target->gdb_service->core[1];
1143 /* fake resume at next poll we play the target core[1], see poll*/
1144 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1145 return 0;
1146 }
1147 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1148 if (target->smp)
1149 { target->gdb_service->core[0] = -1;
1150 retval += cortex_a8_restore_smp(target, handle_breakpoints);
1151 }
1152 cortex_a8_internal_restart(target);
1153
1154 if (!debug_execution)
1155 {
1156 target->state = TARGET_RUNNING;
1157 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1158 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1159 }
1160 else
1161 {
1162 target->state = TARGET_DEBUG_RUNNING;
1163 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1164 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1165 }
1166
1167 return ERROR_OK;
1168 }
1169
1170 static int cortex_a8_debug_entry(struct target *target)
1171 {
1172 int i;
1173 uint32_t regfile[16], cpsr, dscr;
1174 int retval = ERROR_OK;
1175 struct working_area *regfile_working_area = NULL;
1176 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1177 struct armv7a_common *armv7a = target_to_armv7a(target);
1178 struct arm *armv4_5 = &armv7a->armv4_5_common;
1179 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1180 struct reg *reg;
1181
1182 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1183
1184 /* REVISIT surely we should not re-read DSCR !! */
1185 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1186 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1187 if (retval != ERROR_OK)
1188 return retval;
1189
1190 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1191 * imprecise data aborts get discarded by issuing a Data
1192 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1193 */
1194
1195 /* Enable the ITR execution once we are in debug mode */
1196 dscr |= DSCR_ITR_EN;
1197 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1198 armv7a->debug_base + CPUDBG_DSCR, dscr);
1199 if (retval != ERROR_OK)
1200 return retval;
1201
1202 /* Examine debug reason */
1203 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1204
1205 /* save address of instruction that triggered the watchpoint? */
1206 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1207 uint32_t wfar;
1208
1209 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1210 armv7a->debug_base + CPUDBG_WFAR,
1211 &wfar);
1212 if (retval != ERROR_OK)
1213 return retval;
1214 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1215 }
1216
1217 /* REVISIT fast_reg_read is never set ... */
1218
1219 /* Examine target state and mode */
1220 if (cortex_a8->fast_reg_read)
1221 target_alloc_working_area(target, 64, &regfile_working_area);
1222
1223 /* First load register acessible through core debug port*/
1224 if (!regfile_working_area)
1225 {
1226 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1227 }
1228 else
1229 {
1230 retval = cortex_a8_read_regs_through_mem(target,
1231 regfile_working_area->address, regfile);
1232
1233 target_free_working_area(target, regfile_working_area);
1234 if (retval != ERROR_OK)
1235 {
1236 return retval;
1237 }
1238
1239 /* read Current PSR */
1240 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1241 /* store current cpsr */
1242 if (retval != ERROR_OK)
1243 return retval;
1244
1245 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1246
1247 arm_set_cpsr(armv4_5, cpsr);
1248
1249 /* update cache */
1250 for (i = 0; i <= ARM_PC; i++)
1251 {
1252 reg = arm_reg_current(armv4_5, i);
1253
1254 buf_set_u32(reg->value, 0, 32, regfile[i]);
1255 reg->valid = 1;
1256 reg->dirty = 0;
1257 }
1258
1259 /* Fixup PC Resume Address */
1260 if (cpsr & (1 << 5))
1261 {
1262 // T bit set for Thumb or ThumbEE state
1263 regfile[ARM_PC] -= 4;
1264 }
1265 else
1266 {
1267 // ARM state
1268 regfile[ARM_PC] -= 8;
1269 }
1270
1271 reg = armv4_5->pc;
1272 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1273 reg->dirty = reg->valid;
1274 }
1275
1276 #if 0
1277 /* TODO, Move this */
1278 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1279 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1280 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1281
1282 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1283 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1284
1285 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1286 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1287 #endif
1288
1289 /* Are we in an exception handler */
1290 // armv4_5->exception_number = 0;
1291 if (armv7a->post_debug_entry)
1292 {
1293 retval = armv7a->post_debug_entry(target);
1294 if (retval != ERROR_OK)
1295 return retval;
1296 }
1297
1298 return retval;
1299 }
1300
1301 static int cortex_a8_post_debug_entry(struct target *target)
1302 {
1303 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1304 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1305 int retval;
1306
1307 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1308 retval = armv7a->armv4_5_common.mrc(target, 15,
1309 0, 0, /* op1, op2 */
1310 1, 0, /* CRn, CRm */
1311 &cortex_a8->cp15_control_reg);
1312 if (retval != ERROR_OK)
1313 return retval;
1314 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1315 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1316
1317 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1318 {
1319 armv7a_identify_cache(target);
1320 }
1321
1322 armv7a->armv7a_mmu.mmu_enabled =
1323 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1324 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1325 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1326 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1327 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1328 cortex_a8->curr_mode = armv7a->armv4_5_common.core_mode;
1329
1330 return ERROR_OK;
1331 }
1332
1333 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1334 int handle_breakpoints)
1335 {
1336 struct armv7a_common *armv7a = target_to_armv7a(target);
1337 struct arm *armv4_5 = &armv7a->armv4_5_common;
1338 struct breakpoint *breakpoint = NULL;
1339 struct breakpoint stepbreakpoint;
1340 struct reg *r;
1341 int retval;
1342
1343 if (target->state != TARGET_HALTED)
1344 {
1345 LOG_WARNING("target not halted");
1346 return ERROR_TARGET_NOT_HALTED;
1347 }
1348
1349 /* current = 1: continue on current pc, otherwise continue at <address> */
1350 r = armv4_5->pc;
1351 if (!current)
1352 {
1353 buf_set_u32(r->value, 0, 32, address);
1354 }
1355 else
1356 {
1357 address = buf_get_u32(r->value, 0, 32);
1358 }
1359
1360 /* The front-end may request us not to handle breakpoints.
1361 * But since Cortex-A8 uses breakpoint for single step,
1362 * we MUST handle breakpoints.
1363 */
1364 handle_breakpoints = 1;
1365 if (handle_breakpoints) {
1366 breakpoint = breakpoint_find(target, address);
1367 if (breakpoint)
1368 cortex_a8_unset_breakpoint(target, breakpoint);
1369 }
1370
1371 /* Setup single step breakpoint */
1372 stepbreakpoint.address = address;
1373 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1374 ? 2 : 4;
1375 stepbreakpoint.type = BKPT_HARD;
1376 stepbreakpoint.set = 0;
1377
1378 /* Break on IVA mismatch */
1379 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1380
1381 target->debug_reason = DBG_REASON_SINGLESTEP;
1382
1383 retval = cortex_a8_resume(target, 1, address, 0, 0);
1384 if (retval != ERROR_OK)
1385 return retval;
1386
1387 long long then = timeval_ms();
1388 while (target->state != TARGET_HALTED)
1389 {
1390 retval = cortex_a8_poll(target);
1391 if (retval != ERROR_OK)
1392 return retval;
1393 if (timeval_ms() > then + 1000)
1394 {
1395 LOG_ERROR("timeout waiting for target halt");
1396 return ERROR_FAIL;
1397 }
1398 }
1399
1400 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1401
1402 target->debug_reason = DBG_REASON_BREAKPOINT;
1403
1404 if (breakpoint)
1405 cortex_a8_set_breakpoint(target, breakpoint, 0);
1406
1407 if (target->state != TARGET_HALTED)
1408 LOG_DEBUG("target stepped");
1409
1410 return ERROR_OK;
1411 }
1412
1413 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1414 {
1415 struct armv7a_common *armv7a = target_to_armv7a(target);
1416
1417 LOG_DEBUG(" ");
1418
1419 if (armv7a->pre_restore_context)
1420 armv7a->pre_restore_context(target);
1421
1422 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1423 }
1424
1425
1426 /*
1427 * Cortex-A8 Breakpoint and watchpoint functions
1428 */
1429
1430 /* Setup hardware Breakpoint Register Pair */
1431 static int cortex_a8_set_breakpoint(struct target *target,
1432 struct breakpoint *breakpoint, uint8_t matchmode)
1433 {
1434 int retval;
1435 int brp_i=0;
1436 uint32_t control;
1437 uint8_t byte_addr_select = 0x0F;
1438 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1439 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1440 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1441
1442 if (breakpoint->set)
1443 {
1444 LOG_WARNING("breakpoint already set");
1445 return ERROR_OK;
1446 }
1447
1448 if (breakpoint->type == BKPT_HARD)
1449 {
1450 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1451 brp_i++ ;
1452 if (brp_i >= cortex_a8->brp_num)
1453 {
1454 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1455 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1456 }
1457 breakpoint->set = brp_i + 1;
1458 if (breakpoint->length == 2)
1459 {
1460 byte_addr_select = (3 << (breakpoint->address & 0x02));
1461 }
1462 control = ((matchmode & 0x7) << 20)
1463 | (byte_addr_select << 5)
1464 | (3 << 1) | 1;
1465 brp_list[brp_i].used = 1;
1466 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1467 brp_list[brp_i].control = control;
1468 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1469 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1470 brp_list[brp_i].value);
1471 if (retval != ERROR_OK)
1472 return retval;
1473 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1474 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1475 brp_list[brp_i].control);
1476 if (retval != ERROR_OK)
1477 return retval;
1478 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1479 brp_list[brp_i].control,
1480 brp_list[brp_i].value);
1481 }
1482 else if (breakpoint->type == BKPT_SOFT)
1483 {
1484 uint8_t code[4];
1485 if (breakpoint->length == 2)
1486 {
1487 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1488 }
1489 else
1490 {
1491 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1492 }
1493 retval = target->type->read_memory(target,
1494 breakpoint->address & 0xFFFFFFFE,
1495 breakpoint->length, 1,
1496 breakpoint->orig_instr);
1497 if (retval != ERROR_OK)
1498 return retval;
1499 retval = target->type->write_memory(target,
1500 breakpoint->address & 0xFFFFFFFE,
1501 breakpoint->length, 1, code);
1502 if (retval != ERROR_OK)
1503 return retval;
1504 breakpoint->set = 0x11; /* Any nice value but 0 */
1505 }
1506
1507 return ERROR_OK;
1508 }
1509
1510 static int cortex_a8_set_context_breakpoint(struct target *target,
1511 struct breakpoint *breakpoint, uint8_t matchmode)
1512 {
1513 int retval = ERROR_FAIL;
1514 int brp_i=0;
1515 uint32_t control;
1516 uint8_t byte_addr_select = 0x0F;
1517 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1518 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1519 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1520
1521 if (breakpoint->set)
1522 {
1523 LOG_WARNING("breakpoint already set");
1524 return retval ;
1525 }
1526 /*check available context BRPs*/
1527 while ((brp_list[brp_i].used || (brp_list[brp_i].type!=BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1528 brp_i++ ;
1529
1530 if (brp_i >= cortex_a8->brp_num)
1531 {
1532 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1533 return ERROR_FAIL;
1534 }
1535
1536 breakpoint->set = brp_i + 1;
1537 control = ((matchmode & 0x7) << 20)
1538 | (byte_addr_select << 5)
1539 | (3 << 1) | 1;
1540 brp_list[brp_i].used = 1;
1541 brp_list[brp_i].value = (breakpoint->asid);
1542 brp_list[brp_i].control = control;
1543 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1544 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1545 brp_list[brp_i].value);
1546 if(retval != ERROR_OK)
1547 return retval;
1548 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1549 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1550 brp_list[brp_i].control);
1551 if(retval != ERROR_OK)
1552 return retval;
1553 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1554 brp_list[brp_i].control,
1555 brp_list[brp_i].value);
1556 return ERROR_OK;
1557
1558 }
1559
1560 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1561 {
1562 int retval = ERROR_FAIL;
1563 int brp_1=0; //holds the contextID pair
1564 int brp_2=0; // holds the IVA pair
1565 uint32_t control_CTX, control_IVA;
1566 uint8_t CTX_byte_addr_select = 0x0F;
1567 uint8_t IVA_byte_addr_select = 0x0F;
1568 uint8_t CTX_machmode = 0x03;
1569 uint8_t IVA_machmode = 0x01;
1570 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1571 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1572 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1573
1574
1575
1576 if (breakpoint->set)
1577 {
1578 LOG_WARNING("breakpoint already set");
1579 return retval ;
1580 }
1581 /*check available context BRPs*/
1582 while ((brp_list[brp_1].used || (brp_list[brp_1].type!=BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1583 brp_1++ ;
1584
1585 printf("brp(CTX) found num: %d \n",brp_1);
1586 if (brp_1 >= cortex_a8->brp_num)
1587 {
1588 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1589 return ERROR_FAIL;
1590 }
1591
1592 while ((brp_list[brp_2].used || (brp_list[brp_2].type!=BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1593 brp_2++ ;
1594
1595 printf("brp(IVA) found num: %d \n",brp_2);
1596 if (brp_2 >= cortex_a8->brp_num)
1597 {
1598 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1599 return ERROR_FAIL;
1600 }
1601
1602 breakpoint->set = brp_1 + 1;
1603 breakpoint->linked_BRP= brp_2;
1604 control_CTX = ((CTX_machmode & 0x7) << 20)
1605 | (brp_2 << 16)
1606 | (0 << 14)
1607 | (CTX_byte_addr_select << 5)
1608 | (3 << 1) | 1;
1609 brp_list[brp_1].used = 1;
1610 brp_list[brp_1].value = (breakpoint->asid);
1611 brp_list[brp_1].control = control_CTX;
1612 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1613 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1614 brp_list[brp_1].value);
1615 if (retval != ERROR_OK)
1616 return retval;
1617 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1618 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1619 brp_list[brp_1].control);
1620 if( retval != ERROR_OK )
1621 return retval;
1622
1623 control_IVA = ((IVA_machmode & 0x7) << 20)
1624 | (brp_1 << 16)
1625 | (IVA_byte_addr_select << 5)
1626 | (3 << 1) | 1;
1627 brp_list[brp_2].used = 1;
1628 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1629 brp_list[brp_2].control = control_IVA;
1630 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1631 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1632 brp_list[brp_2].value);
1633 if (retval != ERROR_OK)
1634 return retval;
1635 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1636 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1637 brp_list[brp_2].control);
1638 if (retval != ERROR_OK )
1639 return retval;
1640
1641 return ERROR_OK;
1642 }
1643
1644
1645 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1646 {
1647 int retval;
1648 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1649 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1650 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1651
1652 if (!breakpoint->set)
1653 {
1654 LOG_WARNING("breakpoint not set");
1655 return ERROR_OK;
1656 }
1657
1658 if (breakpoint->type == BKPT_HARD)
1659 {
1660 if ((breakpoint->address != 0) && (breakpoint->asid != 0))
1661 {
1662 int brp_i = breakpoint->set - 1;
1663 int brp_j = breakpoint->linked_BRP;
1664 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1665 {
1666 LOG_DEBUG("Invalid BRP number in breakpoint");
1667 return ERROR_OK;
1668 }
1669 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1670 brp_list[brp_i].control, brp_list[brp_i].value);
1671 brp_list[brp_i].used = 0;
1672 brp_list[brp_i].value = 0;
1673 brp_list[brp_i].control = 0;
1674 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1675 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1676 brp_list[brp_i].control);
1677 if (retval != ERROR_OK)
1678 return retval;
1679 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1680 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1681 brp_list[brp_i].value);
1682 if (retval != ERROR_OK)
1683 return retval;
1684 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num))
1685 {
1686 LOG_DEBUG("Invalid BRP number in breakpoint");
1687 return ERROR_OK;
1688 }
1689 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1690 brp_list[brp_j].control, brp_list[brp_j].value);
1691 brp_list[brp_j].used = 0;
1692 brp_list[brp_j].value = 0;
1693 brp_list[brp_j].control = 0;
1694 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1695 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1696 brp_list[brp_j].control);
1697 if (retval != ERROR_OK)
1698 return retval;
1699 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1700 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1701 brp_list[brp_j].value);
1702 if (retval != ERROR_OK)
1703 return retval;
1704 breakpoint->linked_BRP = 0;
1705 breakpoint->set = 0;
1706 return ERROR_OK;
1707
1708 }
1709 else
1710 {
1711 int brp_i = breakpoint->set - 1;
1712 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1713 {
1714 LOG_DEBUG("Invalid BRP number in breakpoint");
1715 return ERROR_OK;
1716 }
1717 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1718 brp_list[brp_i].control, brp_list[brp_i].value);
1719 brp_list[brp_i].used = 0;
1720 brp_list[brp_i].value = 0;
1721 brp_list[brp_i].control = 0;
1722 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1723 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1724 brp_list[brp_i].control);
1725 if (retval != ERROR_OK)
1726 return retval;
1727 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1728 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1729 brp_list[brp_i].value);
1730 if (retval != ERROR_OK)
1731 return retval;
1732 breakpoint->set = 0;
1733 return ERROR_OK;
1734 }
1735 }
1736 else
1737 {
1738 /* restore original instruction (kept in target endianness) */
1739 if (breakpoint->length == 4)
1740 {
1741 retval = target->type->write_memory(target,
1742 breakpoint->address & 0xFFFFFFFE,
1743 4, 1, breakpoint->orig_instr);
1744 if (retval != ERROR_OK)
1745 return retval;
1746 }
1747 else
1748 {
1749 retval = target->type->write_memory(target,
1750 breakpoint->address & 0xFFFFFFFE,
1751 2, 1, breakpoint->orig_instr);
1752 if (retval != ERROR_OK)
1753 return retval;
1754 }
1755 }
1756 breakpoint->set = 0;
1757
1758 return ERROR_OK;
1759 }
1760
1761 static int cortex_a8_add_breakpoint(struct target *target,
1762 struct breakpoint *breakpoint)
1763 {
1764 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1765
1766 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1767 {
1768 LOG_INFO("no hardware breakpoint available");
1769 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1770 }
1771
1772 if (breakpoint->type == BKPT_HARD)
1773 cortex_a8->brp_num_available--;
1774
1775 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1776 }
1777
1778 static int cortex_a8_add_context_breakpoint(struct target *target,
1779 struct breakpoint *breakpoint)
1780 {
1781 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1782
1783 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1784 {
1785 LOG_INFO("no hardware breakpoint available");
1786 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1787 }
1788
1789 if (breakpoint->type == BKPT_HARD)
1790 cortex_a8->brp_num_available--;
1791
1792 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1793 }
1794
1795 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1796 struct breakpoint *breakpoint)
1797 {
1798 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1799
1800 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1801 {
1802 LOG_INFO("no hardware breakpoint available");
1803 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1804 }
1805
1806 if (breakpoint->type == BKPT_HARD)
1807 cortex_a8->brp_num_available--;
1808
1809 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1810 }
1811
1812
1813 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1814 {
1815 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1816
1817 #if 0
1818 /* It is perfectly possible to remove breakpoints while the target is running */
1819 if (target->state != TARGET_HALTED)
1820 {
1821 LOG_WARNING("target not halted");
1822 return ERROR_TARGET_NOT_HALTED;
1823 }
1824 #endif
1825
1826 if (breakpoint->set)
1827 {
1828 cortex_a8_unset_breakpoint(target, breakpoint);
1829 if (breakpoint->type == BKPT_HARD)
1830 cortex_a8->brp_num_available++ ;
1831 }
1832
1833
1834 return ERROR_OK;
1835 }
1836
1837
1838
1839 /*
1840 * Cortex-A8 Reset functions
1841 */
1842
1843 static int cortex_a8_assert_reset(struct target *target)
1844 {
1845 struct armv7a_common *armv7a = target_to_armv7a(target);
1846
1847 LOG_DEBUG(" ");
1848
1849 /* FIXME when halt is requested, make it work somehow... */
1850
1851 /* Issue some kind of warm reset. */
1852 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1853 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1854 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1855 /* REVISIT handle "pulls" cases, if there's
1856 * hardware that needs them to work.
1857 */
1858 jtag_add_reset(0, 1);
1859 } else {
1860 LOG_ERROR("%s: how to reset?", target_name(target));
1861 return ERROR_FAIL;
1862 }
1863
1864 /* registers are now invalid */
1865 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1866
1867 target->state = TARGET_RESET;
1868
1869 return ERROR_OK;
1870 }
1871
1872 static int cortex_a8_deassert_reset(struct target *target)
1873 {
1874 int retval;
1875
1876 LOG_DEBUG(" ");
1877
1878 /* be certain SRST is off */
1879 jtag_add_reset(0, 0);
1880
1881 retval = cortex_a8_poll(target);
1882 if (retval != ERROR_OK)
1883 return retval;
1884
1885 if (target->reset_halt) {
1886 if (target->state != TARGET_HALTED) {
1887 LOG_WARNING("%s: ran after reset and before halt ...",
1888 target_name(target));
1889 if ((retval = target_halt(target)) != ERROR_OK)
1890 return retval;
1891 }
1892 }
1893
1894 return ERROR_OK;
1895 }
1896
1897
1898 static int cortex_a8_write_apb_ab_memory(struct target *target,
1899 uint32_t address, uint32_t size,
1900 uint32_t count, const uint8_t *buffer)
1901 {
1902
1903 /* write memory through APB-AP */
1904
1905 int retval = ERROR_INVALID_ARGUMENTS;
1906 struct armv7a_common *armv7a = target_to_armv7a(target);
1907 struct arm *armv4_5 = &armv7a->armv4_5_common;
1908 int total_bytes = count * size;
1909 int start_byte, nbytes_to_write, i;
1910 struct reg *reg;
1911 union _data {
1912 uint8_t uc_a[4];
1913 uint32_t ui;
1914 } data;
1915
1916 if (target->state != TARGET_HALTED)
1917 {
1918 LOG_WARNING("target not halted");
1919 return ERROR_TARGET_NOT_HALTED;
1920 }
1921
1922 reg = arm_reg_current(armv4_5, 0);
1923 reg->dirty = 1;
1924 reg = arm_reg_current(armv4_5, 1);
1925 reg->dirty = 1;
1926
1927 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1928 if (retval != ERROR_OK)
1929 return retval;
1930
1931 start_byte = address & 0x3;
1932
1933 while (total_bytes > 0) {
1934
1935 nbytes_to_write = 4 - start_byte;
1936 if (total_bytes < nbytes_to_write)
1937 nbytes_to_write = total_bytes;
1938
1939 if ( nbytes_to_write != 4 ) {
1940
1941 /* execute instruction LDR r1, [r0] */
1942 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1943 if (retval != ERROR_OK)
1944 return retval;
1945
1946 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1947 if (retval != ERROR_OK)
1948 return retval;
1949 }
1950
1951 for (i = 0; i < nbytes_to_write; ++i)
1952 data.uc_a[i + start_byte] = *buffer++;
1953
1954 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1955 if (retval != ERROR_OK)
1956 return retval;
1957
1958 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1959 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0) , NULL);
1960 if (retval != ERROR_OK)
1961 return retval;
1962
1963 total_bytes -= nbytes_to_write;
1964 start_byte = 0;
1965 }
1966
1967 return retval;
1968 }
1969
1970
1971 static int cortex_a8_read_apb_ab_memory(struct target *target,
1972 uint32_t address, uint32_t size,
1973 uint32_t count, uint8_t *buffer)
1974 {
1975
1976 /* read memory through APB-AP */
1977
1978 int retval = ERROR_INVALID_ARGUMENTS;
1979 struct armv7a_common *armv7a = target_to_armv7a(target);
1980 struct arm *armv4_5 = &armv7a->armv4_5_common;
1981 int total_bytes = count * size;
1982 int start_byte, nbytes_to_read, i;
1983 struct reg *reg;
1984 union _data {
1985 uint8_t uc_a[4];
1986 uint32_t ui;
1987 } data;
1988
1989 if (target->state != TARGET_HALTED)
1990 {
1991 LOG_WARNING("target not halted");
1992 return ERROR_TARGET_NOT_HALTED;
1993 }
1994
1995 reg = arm_reg_current(armv4_5, 0);
1996 reg->dirty = 1;
1997 reg = arm_reg_current(armv4_5, 1);
1998 reg->dirty = 1;
1999
2000 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
2001 if (retval != ERROR_OK)
2002 return retval;
2003
2004 start_byte = address & 0x3;
2005
2006 while (total_bytes > 0) {
2007
2008 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
2009 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
2010 if (retval != ERROR_OK)
2011 return retval;
2012
2013 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
2014 if (retval != ERROR_OK)
2015 return retval;
2016
2017 nbytes_to_read = 4 - start_byte;
2018 if (total_bytes < nbytes_to_read)
2019 nbytes_to_read = total_bytes;
2020
2021 for (i = 0; i < nbytes_to_read; ++i)
2022 *buffer++ = data.uc_a[i + start_byte];
2023
2024 total_bytes -= nbytes_to_read;
2025 start_byte = 0;
2026 }
2027
2028 return retval;
2029 }
2030
2031
2032
2033 /*
2034 * Cortex-A8 Memory access
2035 *
2036 * This is same Cortex M3 but we must also use the correct
2037 * ap number for every access.
2038 */
2039
2040 static int cortex_a8_read_phys_memory(struct target *target,
2041 uint32_t address, uint32_t size,
2042 uint32_t count, uint8_t *buffer)
2043 {
2044 struct armv7a_common *armv7a = target_to_armv7a(target);
2045 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2046 int retval = ERROR_INVALID_ARGUMENTS;
2047 uint8_t apsel = swjdp->apsel;
2048 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
2049 address, size, count);
2050
2051 if (count && buffer) {
2052
2053 if ( apsel == swjdp_memoryap ) {
2054
2055 /* read memory through AHB-AP */
2056
2057 switch (size) {
2058 case 4:
2059 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
2060 buffer, 4 * count, address);
2061 break;
2062 case 2:
2063 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
2064 buffer, 2 * count, address);
2065 break;
2066 case 1:
2067 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
2068 buffer, count, address);
2069 break;
2070 }
2071 } else {
2072
2073 /* read memory through APB-AP */
2074 /* disable mmu */
2075 retval = cortex_a8_mmu_modify(target, 0);
2076 if (retval != ERROR_OK) return retval;
2077 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2078 }
2079 }
2080 return retval;
2081 }
2082
2083 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2084 uint32_t size, uint32_t count, uint8_t *buffer)
2085 {
2086 int enabled = 0;
2087 uint32_t virt, phys;
2088 int retval;
2089 struct armv7a_common *armv7a = target_to_armv7a(target);
2090 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2091 uint8_t apsel = swjdp->apsel;
2092
2093 /* cortex_a8 handles unaligned memory access */
2094 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2095 size, count);
2096 if (apsel == swjdp_memoryap) {
2097 retval = cortex_a8_mmu(target, &enabled);
2098 if (retval != ERROR_OK)
2099 return retval;
2100
2101
2102 if(enabled)
2103 {
2104 virt = address;
2105 retval = cortex_a8_virt2phys(target, virt, &phys);
2106 if (retval != ERROR_OK)
2107 return retval;
2108
2109 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2110 virt, phys);
2111 address = phys;
2112 }
2113 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2114 } else {
2115 retval = cortex_a8_check_address(target, address);
2116 if (retval != ERROR_OK) return retval;
2117 /* enable mmu */
2118 retval = cortex_a8_mmu_modify(target, 1);
2119 if (retval != ERROR_OK) return retval;
2120 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2121 }
2122 return retval;
2123 }
2124
2125 static int cortex_a8_write_phys_memory(struct target *target,
2126 uint32_t address, uint32_t size,
2127 uint32_t count, const uint8_t *buffer)
2128 {
2129 struct armv7a_common *armv7a = target_to_armv7a(target);
2130 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2131 int retval = ERROR_INVALID_ARGUMENTS;
2132 uint8_t apsel = swjdp->apsel;
2133
2134 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2135 size, count);
2136
2137 if (count && buffer) {
2138
2139 if ( apsel == swjdp_memoryap ) {
2140
2141 /* write memory through AHB-AP */
2142
2143 switch (size) {
2144 case 4:
2145 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
2146 buffer, 4 * count, address);
2147 break;
2148 case 2:
2149 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
2150 buffer, 2 * count, address);
2151 break;
2152 case 1:
2153 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
2154 buffer, count, address);
2155 break;
2156 }
2157
2158 } else {
2159
2160 /* write memory through APB-AP */
2161 retval = cortex_a8_mmu_modify(target, 0);
2162 if (retval != ERROR_OK)
2163 return retval;
2164 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2165 }
2166 }
2167
2168
2169 /* REVISIT this op is generic ARMv7-A/R stuff */
2170 if (retval == ERROR_OK && target->state == TARGET_HALTED)
2171 {
2172 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
2173
2174 retval = dpm->prepare(dpm);
2175 if (retval != ERROR_OK)
2176 return retval;
2177
2178 /* The Cache handling will NOT work with MMU active, the
2179 * wrong addresses will be invalidated!
2180 *
2181 * For both ICache and DCache, walk all cache lines in the
2182 * address range. Cortex-A8 has fixed 64 byte line length.
2183 *
2184 * REVISIT per ARMv7, these may trigger watchpoints ...
2185 */
2186
2187 /* invalidate I-Cache */
2188 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled)
2189 {
2190 /* ICIMVAU - Invalidate Cache single entry
2191 * with MVA to PoU
2192 * MCR p15, 0, r0, c7, c5, 1
2193 */
2194 for (uint32_t cacheline = address;
2195 cacheline < address + size * count;
2196 cacheline += 64) {
2197 retval = dpm->instr_write_data_r0(dpm,
2198 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2199 cacheline);
2200 if (retval != ERROR_OK)
2201 return retval;
2202 }
2203 }
2204
2205 /* invalidate D-Cache */
2206 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled)
2207 {
2208 /* DCIMVAC - Invalidate data Cache line
2209 * with MVA to PoC
2210 * MCR p15, 0, r0, c7, c6, 1
2211 */
2212 for (uint32_t cacheline = address;
2213 cacheline < address + size * count;
2214 cacheline += 64) {
2215 retval = dpm->instr_write_data_r0(dpm,
2216 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2217 cacheline);
2218 if (retval != ERROR_OK)
2219 return retval;
2220 }
2221 }
2222
2223 /* (void) */ dpm->finish(dpm);
2224 }
2225
2226 return retval;
2227 }
2228
2229 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2230 uint32_t size, uint32_t count, const uint8_t *buffer)
2231 {
2232 int enabled = 0;
2233 uint32_t virt, phys;
2234 int retval;
2235 struct armv7a_common *armv7a = target_to_armv7a(target);
2236 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2237 uint8_t apsel = swjdp->apsel;
2238 /* cortex_a8 handles unaligned memory access */
2239 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2240 size, count);
2241 if (apsel == swjdp_memoryap) {
2242
2243 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
2244 retval = cortex_a8_mmu(target, &enabled);
2245 if (retval != ERROR_OK)
2246 return retval;
2247
2248 if(enabled)
2249 {
2250 virt = address;
2251 retval = cortex_a8_virt2phys(target, virt, &phys);
2252 if (retval != ERROR_OK)
2253 return retval;
2254 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
2255 address = phys;
2256 }
2257
2258 retval = cortex_a8_write_phys_memory(target, address, size,
2259 count, buffer);
2260 }
2261 else {
2262 retval = cortex_a8_check_address(target, address);
2263 if (retval != ERROR_OK) return retval;
2264 /* enable mmu */
2265 retval = cortex_a8_mmu_modify(target, 1);
2266 if (retval != ERROR_OK) return retval;
2267 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2268 }
2269 return retval;
2270 }
2271
2272 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2273 uint32_t count, const uint8_t *buffer)
2274 {
2275 return cortex_a8_write_memory(target, address, 4, count, buffer);
2276 }
2277
2278
2279 static int cortex_a8_handle_target_request(void *priv)
2280 {
2281 struct target *target = priv;
2282 struct armv7a_common *armv7a = target_to_armv7a(target);
2283 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2284 int retval;
2285
2286 if (!target_was_examined(target))
2287 return ERROR_OK;
2288 if (!target->dbg_msg_enabled)
2289 return ERROR_OK;
2290
2291 if (target->state == TARGET_RUNNING)
2292 {
2293 uint32_t request;
2294 uint32_t dscr;
2295 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2296 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2297
2298 /* check if we have data */
2299 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
2300 {
2301 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2302 armv7a->debug_base+ CPUDBG_DTRTX, &request);
2303 if (retval == ERROR_OK)
2304 {
2305 target_request(target, request);
2306 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2307 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
2308 }
2309 }
2310 }
2311
2312 return ERROR_OK;
2313 }
2314
2315 /*
2316 * Cortex-A8 target information and configuration
2317 */
2318
2319 static int cortex_a8_examine_first(struct target *target)
2320 {
2321 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2322 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2323 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2324 int i;
2325 int retval = ERROR_OK;
2326 uint32_t didr, ctypr, ttypr, cpuid;
2327
2328 /* We do one extra read to ensure DAP is configured,
2329 * we call ahbap_debugport_init(swjdp) instead
2330 */
2331 retval = ahbap_debugport_init(swjdp);
2332 if (retval != ERROR_OK)
2333 return retval;
2334
2335 if (!target->dbgbase_set)
2336 {
2337 uint32_t dbgbase;
2338 /* Get ROM Table base */
2339 uint32_t apid;
2340 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2341 if (retval != ERROR_OK)
2342 return retval;
2343 /* Lookup 0x15 -- Processor DAP */
2344 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2345 &armv7a->debug_base);
2346 if (retval != ERROR_OK)
2347 return retval;
2348 }
2349 else
2350 {
2351 armv7a->debug_base = target->dbgbase;
2352 }
2353
2354 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2355 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2356 if (retval != ERROR_OK)
2357 return retval;
2358
2359 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2360 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
2361 {
2362 LOG_DEBUG("Examine %s failed", "CPUID");
2363 return retval;
2364 }
2365
2366 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2367 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
2368 {
2369 LOG_DEBUG("Examine %s failed", "CTYPR");
2370 return retval;
2371 }
2372
2373 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2374 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
2375 {
2376 LOG_DEBUG("Examine %s failed", "TTYPR");
2377 return retval;
2378 }
2379
2380 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2381 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
2382 {
2383 LOG_DEBUG("Examine %s failed", "DIDR");
2384 return retval;
2385 }
2386
2387 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2388 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2389 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2390 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2391
2392 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
2393 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2394 if (retval != ERROR_OK)
2395 return retval;
2396
2397 /* Setup Breakpoint Register Pairs */
2398 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2399 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2400 cortex_a8->brp_num_available = cortex_a8->brp_num;
2401 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2402 // cortex_a8->brb_enabled = ????;
2403 for (i = 0; i < cortex_a8->brp_num; i++)
2404 {
2405 cortex_a8->brp_list[i].used = 0;
2406 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2407 cortex_a8->brp_list[i].type = BRP_NORMAL;
2408 else
2409 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2410 cortex_a8->brp_list[i].value = 0;
2411 cortex_a8->brp_list[i].control = 0;
2412 cortex_a8->brp_list[i].BRPn = i;
2413 }
2414
2415 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2416
2417 target_set_examined(target);
2418 return ERROR_OK;
2419 }
2420
2421 static int cortex_a8_examine(struct target *target)
2422 {
2423 int retval = ERROR_OK;
2424
2425 /* don't re-probe hardware after each reset */
2426 if (!target_was_examined(target))
2427 retval = cortex_a8_examine_first(target);
2428
2429 /* Configure core debug access */
2430 if (retval == ERROR_OK)
2431 retval = cortex_a8_init_debug_access(target);
2432
2433 return retval;
2434 }
2435
2436 /*
2437 * Cortex-A8 target creation and initialization
2438 */
2439
2440 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2441 struct target *target)
2442 {
2443 /* examine_first() does a bunch of this */
2444 return ERROR_OK;
2445 }
2446
2447 static int cortex_a8_init_arch_info(struct target *target,
2448 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2449 {
2450 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2451 struct adiv5_dap *dap = &armv7a->dap;
2452
2453 armv7a->armv4_5_common.dap = dap;
2454
2455 /* Setup struct cortex_a8_common */
2456 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2457 /* tap has no dap initialized */
2458 if (!tap->dap)
2459 {
2460 armv7a->armv4_5_common.dap = dap;
2461 /* Setup struct cortex_a8_common */
2462
2463 /* prepare JTAG information for the new target */
2464 cortex_a8->jtag_info.tap = tap;
2465 cortex_a8->jtag_info.scann_size = 4;
2466
2467 /* Leave (only) generic DAP stuff for debugport_init() */
2468 dap->jtag_info = &cortex_a8->jtag_info;
2469
2470 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2471 dap->tar_autoincr_block = (1 << 10);
2472 dap->memaccess_tck = 80;
2473 tap->dap = dap;
2474 }
2475 else
2476 armv7a->armv4_5_common.dap = tap->dap;
2477
2478 cortex_a8->fast_reg_read = 0;
2479
2480 /* register arch-specific functions */
2481 armv7a->examine_debug_reason = NULL;
2482
2483 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2484
2485 armv7a->pre_restore_context = NULL;
2486
2487 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2488
2489
2490 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2491
2492 /* REVISIT v7a setup should be in a v7a-specific routine */
2493 armv7a_init_arch_info(target, armv7a);
2494 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2495
2496 return ERROR_OK;
2497 }
2498
2499 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2500 {
2501 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2502
2503 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2504 }
2505
2506
2507
2508 static int cortex_a8_mmu(struct target *target, int *enabled)
2509 {
2510 if (target->state != TARGET_HALTED) {
2511 LOG_ERROR("%s: target not halted", __func__);
2512 return ERROR_TARGET_INVALID;
2513 }
2514
2515 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2516 return ERROR_OK;
2517 }
2518
2519 static int cortex_a8_virt2phys(struct target *target,
2520 uint32_t virt, uint32_t *phys)
2521 {
2522 int retval = ERROR_FAIL;
2523 struct armv7a_common *armv7a = target_to_armv7a(target);
2524 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2525 uint8_t apsel = swjdp->apsel;
2526 if (apsel == swjdp_memoryap)
2527 {
2528 uint32_t ret;
2529 retval = armv7a_mmu_translate_va(target,
2530 virt, &ret);
2531 if (retval != ERROR_OK)
2532 goto done;
2533 *phys = ret;
2534 }
2535 else
2536 { /* use this method if swjdp_memoryap not selected */
2537 /* mmu must be enable in order to get a correct translation */
2538 retval = cortex_a8_mmu_modify(target, 1);
2539 if (retval != ERROR_OK) goto done;
2540 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2541 }
2542 done:
2543 return retval;
2544 }
2545
2546 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2547 {
2548 struct target *target = get_current_target(CMD_CTX);
2549 struct armv7a_common *armv7a = target_to_armv7a(target);
2550
2551 return armv7a_handle_cache_info_command(CMD_CTX,
2552 &armv7a->armv7a_mmu.armv7a_cache);
2553 }
2554
2555
2556 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2557 {
2558 struct target *target = get_current_target(CMD_CTX);
2559 if (!target_was_examined(target))
2560 {
2561 LOG_ERROR("target not examined yet");
2562 return ERROR_FAIL;
2563 }
2564
2565 return cortex_a8_init_debug_access(target);
2566 }
2567 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2568 {
2569 struct target *target = get_current_target(CMD_CTX);
2570 /* check target is an smp target */
2571 struct target_list *head;
2572 struct target *curr;
2573 head = target->head;
2574 target->smp = 0;
2575 if (head != (struct target_list*)NULL)
2576 {
2577 while (head != (struct target_list*)NULL)
2578 {
2579 curr = head->target;
2580 curr->smp = 0;
2581 head = head->next;
2582 }
2583 /* fixes the target display to the debugger */
2584 target->gdb_service->target = target;
2585 }
2586 return ERROR_OK;
2587 }
2588
2589 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2590 {
2591 struct target *target = get_current_target(CMD_CTX);
2592 struct target_list *head;
2593 struct target *curr;
2594 head = target->head;
2595 if (head != (struct target_list*)NULL)
2596 { target->smp=1;
2597 while (head != (struct target_list*)NULL)
2598 {
2599 curr = head->target;
2600 curr->smp = 1;
2601 head = head->next;
2602 }
2603 }
2604 return ERROR_OK;
2605 }
2606
2607 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2608 {
2609 struct target *target = get_current_target(CMD_CTX);
2610 int retval = ERROR_OK;
2611 struct target_list *head;
2612 head = target->head;
2613 if (head != (struct target_list*)NULL)
2614 {
2615 if (CMD_ARGC == 1)
2616 {
2617 int coreid = 0;
2618 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2619 if (ERROR_OK != retval)
2620 return retval;
2621 target->gdb_service->core[1]=coreid;
2622
2623 }
2624 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2625 , target->gdb_service->core[1]);
2626 }
2627 return ERROR_OK;
2628 }
2629
2630 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2631 {
2632 .name = "cache_info",
2633 .handler = cortex_a8_handle_cache_info_command,
2634 .mode = COMMAND_EXEC,
2635 .help = "display information about target caches",
2636 },
2637 {
2638 .name = "dbginit",
2639 .handler = cortex_a8_handle_dbginit_command,
2640 .mode = COMMAND_EXEC,
2641 .help = "Initialize core debug",
2642 },
2643 { .name ="smp_off",
2644 .handler = cortex_a8_handle_smp_off_command,
2645 .mode = COMMAND_EXEC,
2646 .help = "Stop smp handling",
2647 },
2648 {
2649 .name ="smp_on",
2650 .handler = cortex_a8_handle_smp_on_command,
2651 .mode = COMMAND_EXEC,
2652 .help = "Restart smp handling",
2653 },
2654 {
2655 .name ="smp_gdb",
2656 .handler = cortex_a8_handle_smp_gdb_command,
2657 .mode = COMMAND_EXEC,
2658 .help = "display/fix current core played to gdb",
2659 },
2660
2661
2662 COMMAND_REGISTRATION_DONE
2663 };
2664 static const struct command_registration cortex_a8_command_handlers[] = {
2665 {
2666 .chain = arm_command_handlers,
2667 },
2668 {
2669 .chain = armv7a_command_handlers,
2670 },
2671 {
2672 .name = "cortex_a8",
2673 .mode = COMMAND_ANY,
2674 .help = "Cortex-A8 command group",
2675 .chain = cortex_a8_exec_command_handlers,
2676 },
2677 COMMAND_REGISTRATION_DONE
2678 };
2679
2680 struct target_type cortexa8_target = {
2681 .name = "cortex_a8",
2682
2683 .poll = cortex_a8_poll,
2684 .arch_state = armv7a_arch_state,
2685
2686 .target_request_data = NULL,
2687
2688 .halt = cortex_a8_halt,
2689 .resume = cortex_a8_resume,
2690 .step = cortex_a8_step,
2691
2692 .assert_reset = cortex_a8_assert_reset,
2693 .deassert_reset = cortex_a8_deassert_reset,
2694 .soft_reset_halt = NULL,
2695
2696 /* REVISIT allow exporting VFP3 registers ... */
2697 .get_gdb_reg_list = arm_get_gdb_reg_list,
2698
2699 .read_memory = cortex_a8_read_memory,
2700 .write_memory = cortex_a8_write_memory,
2701 .bulk_write_memory = cortex_a8_bulk_write_memory,
2702
2703 .checksum_memory = arm_checksum_memory,
2704 .blank_check_memory = arm_blank_check_memory,
2705
2706 .run_algorithm = armv4_5_run_algorithm,
2707
2708 .add_breakpoint = cortex_a8_add_breakpoint,
2709 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2710 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2711 .remove_breakpoint = cortex_a8_remove_breakpoint,
2712 .add_watchpoint = NULL,
2713 .remove_watchpoint = NULL,
2714
2715 .commands = cortex_a8_command_handlers,
2716 .target_create = cortex_a8_target_create,
2717 .init_target = cortex_a8_init_target,
2718 .examine = cortex_a8_examine,
2719
2720 .read_phys_memory = cortex_a8_read_phys_memory,
2721 .write_phys_memory = cortex_a8_write_phys_memory,
2722 .mmu = cortex_a8_mmu,
2723 .virt2phys = cortex_a8_virt2phys,
2724 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)