target: Remove write_memory_imp
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include "breakpoints.h"
45 #include "cortex_a.h"
46 #include "register.h"
47 #include "target_request.h"
48 #include "target_type.h"
49 #include "arm_opcodes.h"
50 #include <helper/time_support.h>
51
52 static int cortex_a8_poll(struct target *target);
53 static int cortex_a8_debug_entry(struct target *target);
54 static int cortex_a8_restore_context(struct target *target, bool bpwp);
55 static int cortex_a8_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int cortex_a8_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int cortex_a8_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
64 uint32_t *value, int regnum);
65 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
66 uint32_t value, int regnum);
67 static int cortex_a8_mmu(struct target *target, int *enabled);
68 static int cortex_a8_virt2phys(struct target *target,
69 uint32_t virt, uint32_t *phys);
70
71 /*
72 * FIXME do topology discovery using the ROM; don't
73 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
74 * cores, with different AP numbering ... don't use a #define
75 * for these numbers, use per-core armv7a state.
76 */
77 #define swjdp_memoryap 0
78 #define swjdp_debugap 1
79
80 /* restore cp15_control_reg at resume */
81 static int cortex_a8_restore_cp15_control_reg(struct target *target)
82 {
83 int retval = ERROR_OK;
84 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
85 struct armv7a_common *armv7a = target_to_armv7a(target);
86
87 if (cortex_a8->cp15_control_reg != cortex_a8->cp15_control_reg_curr) {
88 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
89 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); */
90 retval = armv7a->arm.mcr(target, 15,
91 0, 0, /* op1, op2 */
92 1, 0, /* CRn, CRm */
93 cortex_a8->cp15_control_reg);
94 }
95 return retval;
96 }
97
98 /* check address before cortex_a8_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int cortex_a8_check_address(struct target *target, uint32_t address)
101 {
102 struct armv7a_common *armv7a = target_to_armv7a(target);
103 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
104 uint32_t os_border = armv7a->armv7a_mmu.os_border;
105 if ((address < os_border) &&
106 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
107 LOG_ERROR("%x access in userspace and target in supervisor", address);
108 return ERROR_FAIL;
109 }
110 if ((address >= os_border) &&
111 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a8->curr_mode = ARM_MODE_SVC;
114 LOG_INFO("%x access in kernel space and target not in supervisor",
115 address);
116 return ERROR_OK;
117 }
118 if ((address < os_border) &&
119 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
120 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
121 cortex_a8->curr_mode = ARM_MODE_ANY;
122 }
123 return ERROR_OK;
124 }
125 /* modify cp15_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int cortex_a8_mmu_modify(struct target *target, int enable)
129 {
130 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 int retval = ERROR_OK;
133 if (enable) {
134 /* if mmu enabled at target stop and mmu not enable */
135 if (!(cortex_a8->cp15_control_reg & 0x1U)) {
136 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
137 return ERROR_FAIL;
138 }
139 if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) {
140 cortex_a8->cp15_control_reg_curr |= 0x1U;
141 retval = armv7a->arm.mcr(target, 15,
142 0, 0, /* op1, op2 */
143 1, 0, /* CRn, CRm */
144 cortex_a8->cp15_control_reg_curr);
145 }
146 } else {
147 if (cortex_a8->cp15_control_reg_curr & 0x4U) {
148 /* data cache is active */
149 cortex_a8->cp15_control_reg_curr &= ~0x4U;
150 /* flush data cache armv7 function to be called */
151 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
152 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
153 }
154 if ((cortex_a8->cp15_control_reg_curr & 0x1U)) {
155 cortex_a8->cp15_control_reg_curr &= ~0x1U;
156 retval = armv7a->arm.mcr(target, 15,
157 0, 0, /* op1, op2 */
158 1, 0, /* CRn, CRm */
159 cortex_a8->cp15_control_reg_curr);
160 }
161 }
162 return retval;
163 }
164
165 /*
166 * Cortex-A8 Basic debug access, very low level assumes state is saved
167 */
168 static int cortex_a8_init_debug_access(struct target *target)
169 {
170 struct armv7a_common *armv7a = target_to_armv7a(target);
171 struct adiv5_dap *swjdp = armv7a->arm.dap;
172 int retval;
173 uint32_t dummy;
174
175 LOG_DEBUG(" ");
176
177 /* Unlocking the debug registers for modification
178 * The debugport might be uninitialised so try twice */
179 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
180 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
181 if (retval != ERROR_OK) {
182 /* try again */
183 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
184 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
185 if (retval == ERROR_OK)
186 LOG_USER(
187 "Locking debug access failed on first, but succeeded on second try.");
188 }
189 if (retval != ERROR_OK)
190 return retval;
191 /* Clear Sticky Power Down status Bit in PRSR to enable access to
192 the registers in the Core Power Domain */
193 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
194 armv7a->debug_base + CPUDBG_PRSR, &dummy);
195 if (retval != ERROR_OK)
196 return retval;
197
198 /* Enabling of instruction execution in debug mode is done in debug_entry code */
199
200 /* Resync breakpoint registers */
201
202 /* Since this is likely called from init or reset, update target state information*/
203 return cortex_a8_poll(target);
204 }
205
206 /* To reduce needless round-trips, pass in a pointer to the current
207 * DSCR value. Initialize it to zero if you just need to know the
208 * value on return from this function; or DSCR_INSTR_COMP if you
209 * happen to know that no instruction is pending.
210 */
211 static int cortex_a8_exec_opcode(struct target *target,
212 uint32_t opcode, uint32_t *dscr_p)
213 {
214 uint32_t dscr;
215 int retval;
216 struct armv7a_common *armv7a = target_to_armv7a(target);
217 struct adiv5_dap *swjdp = armv7a->arm.dap;
218
219 dscr = dscr_p ? *dscr_p : 0;
220
221 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
222
223 /* Wait for InstrCompl bit to be set */
224 long long then = timeval_ms();
225 while ((dscr & DSCR_INSTR_COMP) == 0) {
226 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
227 armv7a->debug_base + CPUDBG_DSCR, &dscr);
228 if (retval != ERROR_OK) {
229 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
230 return retval;
231 }
232 if (timeval_ms() > then + 1000) {
233 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
234 return ERROR_FAIL;
235 }
236 }
237
238 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
239 armv7a->debug_base + CPUDBG_ITR, opcode);
240 if (retval != ERROR_OK)
241 return retval;
242
243 then = timeval_ms();
244 do {
245 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
246 armv7a->debug_base + CPUDBG_DSCR, &dscr);
247 if (retval != ERROR_OK) {
248 LOG_ERROR("Could not read DSCR register");
249 return retval;
250 }
251 if (timeval_ms() > then + 1000) {
252 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
253 return ERROR_FAIL;
254 }
255 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
256
257 if (dscr_p)
258 *dscr_p = dscr;
259
260 return retval;
261 }
262
263 /**************************************************************************
264 Read core register with very few exec_opcode, fast but needs work_area.
265 This can cause problems with MMU active.
266 **************************************************************************/
267 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
268 uint32_t *regfile)
269 {
270 int retval = ERROR_OK;
271 struct armv7a_common *armv7a = target_to_armv7a(target);
272 struct adiv5_dap *swjdp = armv7a->arm.dap;
273
274 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
275 if (retval != ERROR_OK)
276 return retval;
277 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
278 if (retval != ERROR_OK)
279 return retval;
280 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
281 if (retval != ERROR_OK)
282 return retval;
283
284 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
285 (uint8_t *)(&regfile[1]), 4*15, address);
286
287 return retval;
288 }
289
290 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
291 uint32_t *value, int regnum)
292 {
293 int retval = ERROR_OK;
294 uint8_t reg = regnum&0xFF;
295 uint32_t dscr = 0;
296 struct armv7a_common *armv7a = target_to_armv7a(target);
297 struct adiv5_dap *swjdp = armv7a->arm.dap;
298
299 if (reg > 17)
300 return retval;
301
302 if (reg < 15) {
303 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
304 retval = cortex_a8_exec_opcode(target,
305 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
309 } else if (reg == 15) {
310 /* "MOV r0, r15"; then move r0 to DCCTX */
311 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
312 if (retval != ERROR_OK)
313 return retval;
314 retval = cortex_a8_exec_opcode(target,
315 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
316 &dscr);
317 if (retval != ERROR_OK)
318 return retval;
319 } else {
320 /* "MRS r0, CPSR" or "MRS r0, SPSR"
321 * then move r0 to DCCTX
322 */
323 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
324 if (retval != ERROR_OK)
325 return retval;
326 retval = cortex_a8_exec_opcode(target,
327 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
328 &dscr);
329 if (retval != ERROR_OK)
330 return retval;
331 }
332
333 /* Wait for DTRRXfull then read DTRRTX */
334 long long then = timeval_ms();
335 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
336 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
337 armv7a->debug_base + CPUDBG_DSCR, &dscr);
338 if (retval != ERROR_OK)
339 return retval;
340 if (timeval_ms() > then + 1000) {
341 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
342 return ERROR_FAIL;
343 }
344 }
345
346 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
347 armv7a->debug_base + CPUDBG_DTRTX, value);
348 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
349
350 return retval;
351 }
352
353 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
354 uint32_t value, int regnum)
355 {
356 int retval = ERROR_OK;
357 uint8_t Rd = regnum&0xFF;
358 uint32_t dscr;
359 struct armv7a_common *armv7a = target_to_armv7a(target);
360 struct adiv5_dap *swjdp = armv7a->arm.dap;
361
362 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
363
364 /* Check that DCCRX is not full */
365 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
366 armv7a->debug_base + CPUDBG_DSCR, &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 if (dscr & DSCR_DTR_RX_FULL) {
370 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
371 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
372 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
373 &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 }
377
378 if (Rd > 17)
379 return retval;
380
381 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
382 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
383 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
384 armv7a->debug_base + CPUDBG_DTRRX, value);
385 if (retval != ERROR_OK)
386 return retval;
387
388 if (Rd < 15) {
389 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
390 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
391 &dscr);
392
393 if (retval != ERROR_OK)
394 return retval;
395 } else if (Rd == 15) {
396 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
397 * then "mov r15, r0"
398 */
399 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
400 &dscr);
401 if (retval != ERROR_OK)
402 return retval;
403 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
404 if (retval != ERROR_OK)
405 return retval;
406 } else {
407 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
408 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
409 */
410 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
411 &dscr);
412 if (retval != ERROR_OK)
413 return retval;
414 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
415 &dscr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 /* "Prefetch flush" after modifying execution status in CPSR */
420 if (Rd == 16) {
421 retval = cortex_a8_exec_opcode(target,
422 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
423 &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 }
427 }
428
429 return retval;
430 }
431
432 /* Write to memory mapped registers directly with no cache or mmu handling */
433 static int cortex_a8_dap_write_memap_register_u32(struct target *target,
434 uint32_t address,
435 uint32_t value)
436 {
437 int retval;
438 struct armv7a_common *armv7a = target_to_armv7a(target);
439 struct adiv5_dap *swjdp = armv7a->arm.dap;
440
441 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
442
443 return retval;
444 }
445
446 /*
447 * Cortex-A8 implementation of Debug Programmer's Model
448 *
449 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
450 * so there's no need to poll for it before executing an instruction.
451 *
452 * NOTE that in several of these cases the "stall" mode might be useful.
453 * It'd let us queue a few operations together... prepare/finish might
454 * be the places to enable/disable that mode.
455 */
456
457 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
458 {
459 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
460 }
461
462 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
463 {
464 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
465 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
466 swjdp_debugap, a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
467 }
468
469 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
470 uint32_t *dscr_p)
471 {
472 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
473 uint32_t dscr = DSCR_INSTR_COMP;
474 int retval;
475
476 if (dscr_p)
477 dscr = *dscr_p;
478
479 /* Wait for DTRRXfull */
480 long long then = timeval_ms();
481 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
482 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
483 a8->armv7a_common.debug_base + CPUDBG_DSCR,
484 &dscr);
485 if (retval != ERROR_OK)
486 return retval;
487 if (timeval_ms() > then + 1000) {
488 LOG_ERROR("Timeout waiting for read dcc");
489 return ERROR_FAIL;
490 }
491 }
492
493 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
494 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
495 if (retval != ERROR_OK)
496 return retval;
497 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
498
499 if (dscr_p)
500 *dscr_p = dscr;
501
502 return retval;
503 }
504
505 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
506 {
507 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
508 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
509 uint32_t dscr;
510 int retval;
511
512 /* set up invariant: INSTR_COMP is set after ever DPM operation */
513 long long then = timeval_ms();
514 for (;; ) {
515 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
516 a8->armv7a_common.debug_base + CPUDBG_DSCR,
517 &dscr);
518 if (retval != ERROR_OK)
519 return retval;
520 if ((dscr & DSCR_INSTR_COMP) != 0)
521 break;
522 if (timeval_ms() > then + 1000) {
523 LOG_ERROR("Timeout waiting for dpm prepare");
524 return ERROR_FAIL;
525 }
526 }
527
528 /* this "should never happen" ... */
529 if (dscr & DSCR_DTR_RX_FULL) {
530 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
531 /* Clear DCCRX */
532 retval = cortex_a8_exec_opcode(
533 a8->armv7a_common.arm.target,
534 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
535 &dscr);
536 if (retval != ERROR_OK)
537 return retval;
538 }
539
540 return retval;
541 }
542
543 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
544 {
545 /* REVISIT what could be done here? */
546 return ERROR_OK;
547 }
548
549 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
550 uint32_t opcode, uint32_t data)
551 {
552 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
553 int retval;
554 uint32_t dscr = DSCR_INSTR_COMP;
555
556 retval = cortex_a8_write_dcc(a8, data);
557 if (retval != ERROR_OK)
558 return retval;
559
560 return cortex_a8_exec_opcode(
561 a8->armv7a_common.arm.target,
562 opcode,
563 &dscr);
564 }
565
566 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
567 uint32_t opcode, uint32_t data)
568 {
569 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
570 uint32_t dscr = DSCR_INSTR_COMP;
571 int retval;
572
573 retval = cortex_a8_write_dcc(a8, data);
574 if (retval != ERROR_OK)
575 return retval;
576
577 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
578 retval = cortex_a8_exec_opcode(
579 a8->armv7a_common.arm.target,
580 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584
585 /* then the opcode, taking data from R0 */
586 retval = cortex_a8_exec_opcode(
587 a8->armv7a_common.arm.target,
588 opcode,
589 &dscr);
590
591 return retval;
592 }
593
594 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
595 {
596 struct target *target = dpm->arm->target;
597 uint32_t dscr = DSCR_INSTR_COMP;
598
599 /* "Prefetch flush" after modifying execution status in CPSR */
600 return cortex_a8_exec_opcode(target,
601 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
602 &dscr);
603 }
604
605 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
606 uint32_t opcode, uint32_t *data)
607 {
608 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
609 int retval;
610 uint32_t dscr = DSCR_INSTR_COMP;
611
612 /* the opcode, writing data to DCC */
613 retval = cortex_a8_exec_opcode(
614 a8->armv7a_common.arm.target,
615 opcode,
616 &dscr);
617 if (retval != ERROR_OK)
618 return retval;
619
620 return cortex_a8_read_dcc(a8, data, &dscr);
621 }
622
623
624 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
625 uint32_t opcode, uint32_t *data)
626 {
627 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
628 uint32_t dscr = DSCR_INSTR_COMP;
629 int retval;
630
631 /* the opcode, writing data to R0 */
632 retval = cortex_a8_exec_opcode(
633 a8->armv7a_common.arm.target,
634 opcode,
635 &dscr);
636 if (retval != ERROR_OK)
637 return retval;
638
639 /* write R0 to DCC */
640 retval = cortex_a8_exec_opcode(
641 a8->armv7a_common.arm.target,
642 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
643 &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 return cortex_a8_read_dcc(a8, data, &dscr);
648 }
649
650 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
651 uint32_t addr, uint32_t control)
652 {
653 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
654 uint32_t vr = a8->armv7a_common.debug_base;
655 uint32_t cr = a8->armv7a_common.debug_base;
656 int retval;
657
658 switch (index_t) {
659 case 0 ... 15: /* breakpoints */
660 vr += CPUDBG_BVR_BASE;
661 cr += CPUDBG_BCR_BASE;
662 break;
663 case 16 ... 31: /* watchpoints */
664 vr += CPUDBG_WVR_BASE;
665 cr += CPUDBG_WCR_BASE;
666 index_t -= 16;
667 break;
668 default:
669 return ERROR_FAIL;
670 }
671 vr += 4 * index_t;
672 cr += 4 * index_t;
673
674 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
675 (unsigned) vr, (unsigned) cr);
676
677 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
678 vr, addr);
679 if (retval != ERROR_OK)
680 return retval;
681 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
682 cr, control);
683 return retval;
684 }
685
686 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
687 {
688 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
689 uint32_t cr;
690
691 switch (index_t) {
692 case 0 ... 15:
693 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
694 break;
695 case 16 ... 31:
696 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
697 index_t -= 16;
698 break;
699 default:
700 return ERROR_FAIL;
701 }
702 cr += 4 * index_t;
703
704 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
705
706 /* clear control register */
707 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
708 }
709
710 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
711 {
712 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
713 int retval;
714
715 dpm->arm = &a8->armv7a_common.arm;
716 dpm->didr = didr;
717
718 dpm->prepare = cortex_a8_dpm_prepare;
719 dpm->finish = cortex_a8_dpm_finish;
720
721 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
722 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
723 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
724
725 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
726 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
727
728 dpm->bpwp_enable = cortex_a8_bpwp_enable;
729 dpm->bpwp_disable = cortex_a8_bpwp_disable;
730
731 retval = arm_dpm_setup(dpm);
732 if (retval == ERROR_OK)
733 retval = arm_dpm_initialize(dpm);
734
735 return retval;
736 }
737 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
738 {
739 struct target_list *head;
740 struct target *curr;
741
742 head = target->head;
743 while (head != (struct target_list *)NULL) {
744 curr = head->target;
745 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
746 return curr;
747 head = head->next;
748 }
749 return target;
750 }
751 static int cortex_a8_halt(struct target *target);
752
753 static int cortex_a8_halt_smp(struct target *target)
754 {
755 int retval = 0;
756 struct target_list *head;
757 struct target *curr;
758 head = target->head;
759 while (head != (struct target_list *)NULL) {
760 curr = head->target;
761 if ((curr != target) && (curr->state != TARGET_HALTED))
762 retval += cortex_a8_halt(curr);
763 head = head->next;
764 }
765 return retval;
766 }
767
768 static int update_halt_gdb(struct target *target)
769 {
770 int retval = 0;
771 if (target->gdb_service->core[0] == -1) {
772 target->gdb_service->target = target;
773 target->gdb_service->core[0] = target->coreid;
774 retval += cortex_a8_halt_smp(target);
775 }
776 return retval;
777 }
778
779 /*
780 * Cortex-A8 Run control
781 */
782
783 static int cortex_a8_poll(struct target *target)
784 {
785 int retval = ERROR_OK;
786 uint32_t dscr;
787 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
788 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
789 struct adiv5_dap *swjdp = armv7a->arm.dap;
790 enum target_state prev_target_state = target->state;
791 /* toggle to another core is done by gdb as follow */
792 /* maint packet J core_id */
793 /* continue */
794 /* the next polling trigger an halt event sent to gdb */
795 if ((target->state == TARGET_HALTED) && (target->smp) &&
796 (target->gdb_service) &&
797 (target->gdb_service->target == NULL)) {
798 target->gdb_service->target =
799 get_cortex_a8(target, target->gdb_service->core[1]);
800 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
801 return retval;
802 }
803 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
804 armv7a->debug_base + CPUDBG_DSCR, &dscr);
805 if (retval != ERROR_OK)
806 return retval;
807 cortex_a8->cpudbg_dscr = dscr;
808
809 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
810 if (prev_target_state != TARGET_HALTED) {
811 /* We have a halting debug event */
812 LOG_DEBUG("Target halted");
813 target->state = TARGET_HALTED;
814 if ((prev_target_state == TARGET_RUNNING)
815 || (prev_target_state == TARGET_UNKNOWN)
816 || (prev_target_state == TARGET_RESET)) {
817 retval = cortex_a8_debug_entry(target);
818 if (retval != ERROR_OK)
819 return retval;
820 if (target->smp) {
821 retval = update_halt_gdb(target);
822 if (retval != ERROR_OK)
823 return retval;
824 }
825 target_call_event_callbacks(target,
826 TARGET_EVENT_HALTED);
827 }
828 if (prev_target_state == TARGET_DEBUG_RUNNING) {
829 LOG_DEBUG(" ");
830
831 retval = cortex_a8_debug_entry(target);
832 if (retval != ERROR_OK)
833 return retval;
834 if (target->smp) {
835 retval = update_halt_gdb(target);
836 if (retval != ERROR_OK)
837 return retval;
838 }
839
840 target_call_event_callbacks(target,
841 TARGET_EVENT_DEBUG_HALTED);
842 }
843 }
844 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
845 target->state = TARGET_RUNNING;
846 else {
847 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
848 target->state = TARGET_UNKNOWN;
849 }
850
851 return retval;
852 }
853
854 static int cortex_a8_halt(struct target *target)
855 {
856 int retval = ERROR_OK;
857 uint32_t dscr;
858 struct armv7a_common *armv7a = target_to_armv7a(target);
859 struct adiv5_dap *swjdp = armv7a->arm.dap;
860
861 /*
862 * Tell the core to be halted by writing DRCR with 0x1
863 * and then wait for the core to be halted.
864 */
865 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
866 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
867 if (retval != ERROR_OK)
868 return retval;
869
870 /*
871 * enter halting debug mode
872 */
873 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
874 armv7a->debug_base + CPUDBG_DSCR, &dscr);
875 if (retval != ERROR_OK)
876 return retval;
877
878 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
879 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
880 if (retval != ERROR_OK)
881 return retval;
882
883 long long then = timeval_ms();
884 for (;; ) {
885 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
886 armv7a->debug_base + CPUDBG_DSCR, &dscr);
887 if (retval != ERROR_OK)
888 return retval;
889 if ((dscr & DSCR_CORE_HALTED) != 0)
890 break;
891 if (timeval_ms() > then + 1000) {
892 LOG_ERROR("Timeout waiting for halt");
893 return ERROR_FAIL;
894 }
895 }
896
897 target->debug_reason = DBG_REASON_DBGRQ;
898
899 return ERROR_OK;
900 }
901
902 static int cortex_a8_internal_restore(struct target *target, int current,
903 uint32_t *address, int handle_breakpoints, int debug_execution)
904 {
905 struct armv7a_common *armv7a = target_to_armv7a(target);
906 struct arm *arm = &armv7a->arm;
907 int retval;
908 uint32_t resume_pc;
909
910 if (!debug_execution)
911 target_free_all_working_areas(target);
912
913 #if 0
914 if (debug_execution) {
915 /* Disable interrupts */
916 /* We disable interrupts in the PRIMASK register instead of
917 * masking with C_MASKINTS,
918 * This is probably the same issue as Cortex-M3 Errata 377493:
919 * C_MASKINTS in parallel with disabled interrupts can cause
920 * local faults to not be taken. */
921 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
922 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
923 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
924
925 /* Make sure we are in Thumb mode */
926 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
927 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
928 32) | (1 << 24));
929 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
930 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
931 }
932 #endif
933
934 /* current = 1: continue on current pc, otherwise continue at <address> */
935 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
936 if (!current)
937 resume_pc = *address;
938 else
939 *address = resume_pc;
940
941 /* Make sure that the Armv7 gdb thumb fixups does not
942 * kill the return address
943 */
944 switch (arm->core_state) {
945 case ARM_STATE_ARM:
946 resume_pc &= 0xFFFFFFFC;
947 break;
948 case ARM_STATE_THUMB:
949 case ARM_STATE_THUMB_EE:
950 /* When the return address is loaded into PC
951 * bit 0 must be 1 to stay in Thumb state
952 */
953 resume_pc |= 0x1;
954 break;
955 case ARM_STATE_JAZELLE:
956 LOG_ERROR("How do I resume into Jazelle state??");
957 return ERROR_FAIL;
958 }
959 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
960 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
961 arm->pc->dirty = 1;
962 arm->pc->valid = 1;
963 /* restore dpm_mode at system halt */
964 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
965 /* called it now before restoring context because it uses cpu
966 * register r0 for restoring cp15 control register */
967 retval = cortex_a8_restore_cp15_control_reg(target);
968 if (retval != ERROR_OK)
969 return retval;
970 retval = cortex_a8_restore_context(target, handle_breakpoints);
971 if (retval != ERROR_OK)
972 return retval;
973 target->debug_reason = DBG_REASON_NOTHALTED;
974 target->state = TARGET_RUNNING;
975
976 /* registers are now invalid */
977 register_cache_invalidate(arm->core_cache);
978
979 #if 0
980 /* the front-end may request us not to handle breakpoints */
981 if (handle_breakpoints) {
982 /* Single step past breakpoint at current address */
983 breakpoint = breakpoint_find(target, resume_pc);
984 if (breakpoint) {
985 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
986 cortex_m3_unset_breakpoint(target, breakpoint);
987 cortex_m3_single_step_core(target);
988 cortex_m3_set_breakpoint(target, breakpoint);
989 }
990 }
991
992 #endif
993 return retval;
994 }
995
996 static int cortex_a8_internal_restart(struct target *target)
997 {
998 struct armv7a_common *armv7a = target_to_armv7a(target);
999 struct arm *arm = &armv7a->arm;
1000 struct adiv5_dap *swjdp = arm->dap;
1001 int retval;
1002 uint32_t dscr;
1003 /*
1004 * * Restart core and wait for it to be started. Clear ITRen and sticky
1005 * * exception flags: see ARMv7 ARM, C5.9.
1006 *
1007 * REVISIT: for single stepping, we probably want to
1008 * disable IRQs by default, with optional override...
1009 */
1010
1011 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1012 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1013 if (retval != ERROR_OK)
1014 return retval;
1015
1016 if ((dscr & DSCR_INSTR_COMP) == 0)
1017 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1018
1019 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1020 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1021 if (retval != ERROR_OK)
1022 return retval;
1023
1024 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1025 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1026 DRCR_CLEAR_EXCEPTIONS);
1027 if (retval != ERROR_OK)
1028 return retval;
1029
1030 long long then = timeval_ms();
1031 for (;; ) {
1032 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1033 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1034 if (retval != ERROR_OK)
1035 return retval;
1036 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1037 break;
1038 if (timeval_ms() > then + 1000) {
1039 LOG_ERROR("Timeout waiting for resume");
1040 return ERROR_FAIL;
1041 }
1042 }
1043
1044 target->debug_reason = DBG_REASON_NOTHALTED;
1045 target->state = TARGET_RUNNING;
1046
1047 /* registers are now invalid */
1048 register_cache_invalidate(arm->core_cache);
1049
1050 return ERROR_OK;
1051 }
1052
1053 static int cortex_a8_restore_smp(struct target *target, int handle_breakpoints)
1054 {
1055 int retval = 0;
1056 struct target_list *head;
1057 struct target *curr;
1058 uint32_t address;
1059 head = target->head;
1060 while (head != (struct target_list *)NULL) {
1061 curr = head->target;
1062 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1063 /* resume current address , not in step mode */
1064 retval += cortex_a8_internal_restore(curr, 1, &address,
1065 handle_breakpoints, 0);
1066 retval += cortex_a8_internal_restart(curr);
1067 }
1068 head = head->next;
1069
1070 }
1071 return retval;
1072 }
1073
1074 static int cortex_a8_resume(struct target *target, int current,
1075 uint32_t address, int handle_breakpoints, int debug_execution)
1076 {
1077 int retval = 0;
1078 /* dummy resume for smp toggle in order to reduce gdb impact */
1079 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1080 /* simulate a start and halt of target */
1081 target->gdb_service->target = NULL;
1082 target->gdb_service->core[0] = target->gdb_service->core[1];
1083 /* fake resume at next poll we play the target core[1], see poll*/
1084 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1085 return 0;
1086 }
1087 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1088 if (target->smp) {
1089 target->gdb_service->core[0] = -1;
1090 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1091 if (retval != ERROR_OK)
1092 return retval;
1093 }
1094 cortex_a8_internal_restart(target);
1095
1096 if (!debug_execution) {
1097 target->state = TARGET_RUNNING;
1098 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1099 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1100 } else {
1101 target->state = TARGET_DEBUG_RUNNING;
1102 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1103 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1104 }
1105
1106 return ERROR_OK;
1107 }
1108
1109 static int cortex_a8_debug_entry(struct target *target)
1110 {
1111 int i;
1112 uint32_t regfile[16], cpsr, dscr;
1113 int retval = ERROR_OK;
1114 struct working_area *regfile_working_area = NULL;
1115 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1116 struct armv7a_common *armv7a = target_to_armv7a(target);
1117 struct arm *arm = &armv7a->arm;
1118 struct adiv5_dap *swjdp = armv7a->arm.dap;
1119 struct reg *reg;
1120
1121 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1122
1123 /* REVISIT surely we should not re-read DSCR !! */
1124 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1125 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1126 if (retval != ERROR_OK)
1127 return retval;
1128
1129 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1130 * imprecise data aborts get discarded by issuing a Data
1131 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1132 */
1133
1134 /* Enable the ITR execution once we are in debug mode */
1135 dscr |= DSCR_ITR_EN;
1136 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1137 armv7a->debug_base + CPUDBG_DSCR, dscr);
1138 if (retval != ERROR_OK)
1139 return retval;
1140
1141 /* Examine debug reason */
1142 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1143
1144 /* save address of instruction that triggered the watchpoint? */
1145 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1146 uint32_t wfar;
1147
1148 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1149 armv7a->debug_base + CPUDBG_WFAR,
1150 &wfar);
1151 if (retval != ERROR_OK)
1152 return retval;
1153 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1154 }
1155
1156 /* REVISIT fast_reg_read is never set ... */
1157
1158 /* Examine target state and mode */
1159 if (cortex_a8->fast_reg_read)
1160 target_alloc_working_area(target, 64, &regfile_working_area);
1161
1162 /* First load register acessible through core debug port*/
1163 if (!regfile_working_area)
1164 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1165 else {
1166 retval = cortex_a8_read_regs_through_mem(target,
1167 regfile_working_area->address, regfile);
1168
1169 target_free_working_area(target, regfile_working_area);
1170 if (retval != ERROR_OK)
1171 return retval;
1172
1173 /* read Current PSR */
1174 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1175 /* store current cpsr */
1176 if (retval != ERROR_OK)
1177 return retval;
1178
1179 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1180
1181 arm_set_cpsr(arm, cpsr);
1182
1183 /* update cache */
1184 for (i = 0; i <= ARM_PC; i++) {
1185 reg = arm_reg_current(arm, i);
1186
1187 buf_set_u32(reg->value, 0, 32, regfile[i]);
1188 reg->valid = 1;
1189 reg->dirty = 0;
1190 }
1191
1192 /* Fixup PC Resume Address */
1193 if (cpsr & (1 << 5)) {
1194 /* T bit set for Thumb or ThumbEE state */
1195 regfile[ARM_PC] -= 4;
1196 } else {
1197 /* ARM state */
1198 regfile[ARM_PC] -= 8;
1199 }
1200
1201 reg = arm->pc;
1202 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1203 reg->dirty = reg->valid;
1204 }
1205
1206 #if 0
1207 /* TODO, Move this */
1208 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1209 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1210 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1211
1212 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1213 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1214
1215 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1216 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1217 #endif
1218
1219 /* Are we in an exception handler */
1220 /* armv4_5->exception_number = 0; */
1221 if (armv7a->post_debug_entry) {
1222 retval = armv7a->post_debug_entry(target);
1223 if (retval != ERROR_OK)
1224 return retval;
1225 }
1226
1227 return retval;
1228 }
1229
1230 static int cortex_a8_post_debug_entry(struct target *target)
1231 {
1232 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1233 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1234 int retval;
1235
1236 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1237 retval = armv7a->arm.mrc(target, 15,
1238 0, 0, /* op1, op2 */
1239 1, 0, /* CRn, CRm */
1240 &cortex_a8->cp15_control_reg);
1241 if (retval != ERROR_OK)
1242 return retval;
1243 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1244 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1245
1246 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1247 armv7a_identify_cache(target);
1248
1249 armv7a->armv7a_mmu.mmu_enabled =
1250 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1251 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1252 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1253 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1254 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1255 cortex_a8->curr_mode = armv7a->arm.core_mode;
1256
1257 return ERROR_OK;
1258 }
1259
1260 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1261 int handle_breakpoints)
1262 {
1263 struct armv7a_common *armv7a = target_to_armv7a(target);
1264 struct arm *arm = &armv7a->arm;
1265 struct breakpoint *breakpoint = NULL;
1266 struct breakpoint stepbreakpoint;
1267 struct reg *r;
1268 int retval;
1269
1270 if (target->state != TARGET_HALTED) {
1271 LOG_WARNING("target not halted");
1272 return ERROR_TARGET_NOT_HALTED;
1273 }
1274
1275 /* current = 1: continue on current pc, otherwise continue at <address> */
1276 r = arm->pc;
1277 if (!current)
1278 buf_set_u32(r->value, 0, 32, address);
1279 else
1280 address = buf_get_u32(r->value, 0, 32);
1281
1282 /* The front-end may request us not to handle breakpoints.
1283 * But since Cortex-A8 uses breakpoint for single step,
1284 * we MUST handle breakpoints.
1285 */
1286 handle_breakpoints = 1;
1287 if (handle_breakpoints) {
1288 breakpoint = breakpoint_find(target, address);
1289 if (breakpoint)
1290 cortex_a8_unset_breakpoint(target, breakpoint);
1291 }
1292
1293 /* Setup single step breakpoint */
1294 stepbreakpoint.address = address;
1295 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1296 ? 2 : 4;
1297 stepbreakpoint.type = BKPT_HARD;
1298 stepbreakpoint.set = 0;
1299
1300 /* Break on IVA mismatch */
1301 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1302
1303 target->debug_reason = DBG_REASON_SINGLESTEP;
1304
1305 retval = cortex_a8_resume(target, 1, address, 0, 0);
1306 if (retval != ERROR_OK)
1307 return retval;
1308
1309 long long then = timeval_ms();
1310 while (target->state != TARGET_HALTED) {
1311 retval = cortex_a8_poll(target);
1312 if (retval != ERROR_OK)
1313 return retval;
1314 if (timeval_ms() > then + 1000) {
1315 LOG_ERROR("timeout waiting for target halt");
1316 return ERROR_FAIL;
1317 }
1318 }
1319
1320 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1321
1322 target->debug_reason = DBG_REASON_BREAKPOINT;
1323
1324 if (breakpoint)
1325 cortex_a8_set_breakpoint(target, breakpoint, 0);
1326
1327 if (target->state != TARGET_HALTED)
1328 LOG_DEBUG("target stepped");
1329
1330 return ERROR_OK;
1331 }
1332
1333 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1334 {
1335 struct armv7a_common *armv7a = target_to_armv7a(target);
1336
1337 LOG_DEBUG(" ");
1338
1339 if (armv7a->pre_restore_context)
1340 armv7a->pre_restore_context(target);
1341
1342 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1343 }
1344
1345 /*
1346 * Cortex-A8 Breakpoint and watchpoint functions
1347 */
1348
1349 /* Setup hardware Breakpoint Register Pair */
1350 static int cortex_a8_set_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint, uint8_t matchmode)
1352 {
1353 int retval;
1354 int brp_i = 0;
1355 uint32_t control;
1356 uint8_t byte_addr_select = 0x0F;
1357 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1358 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1359 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1360
1361 if (breakpoint->set) {
1362 LOG_WARNING("breakpoint already set");
1363 return ERROR_OK;
1364 }
1365
1366 if (breakpoint->type == BKPT_HARD) {
1367 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1368 brp_i++;
1369 if (brp_i >= cortex_a8->brp_num) {
1370 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1372 }
1373 breakpoint->set = brp_i + 1;
1374 if (breakpoint->length == 2)
1375 byte_addr_select = (3 << (breakpoint->address & 0x02));
1376 control = ((matchmode & 0x7) << 20)
1377 | (byte_addr_select << 5)
1378 | (3 << 1) | 1;
1379 brp_list[brp_i].used = 1;
1380 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1381 brp_list[brp_i].control = control;
1382 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1383 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1384 brp_list[brp_i].value);
1385 if (retval != ERROR_OK)
1386 return retval;
1387 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1388 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1389 brp_list[brp_i].control);
1390 if (retval != ERROR_OK)
1391 return retval;
1392 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1393 brp_list[brp_i].control,
1394 brp_list[brp_i].value);
1395 } else if (breakpoint->type == BKPT_SOFT) {
1396 uint8_t code[4];
1397 if (breakpoint->length == 2)
1398 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1399 else
1400 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1401 retval = target->type->read_memory(target,
1402 breakpoint->address & 0xFFFFFFFE,
1403 breakpoint->length, 1,
1404 breakpoint->orig_instr);
1405 if (retval != ERROR_OK)
1406 return retval;
1407 retval = target_write_memory(target,
1408 breakpoint->address & 0xFFFFFFFE,
1409 breakpoint->length, 1, code);
1410 if (retval != ERROR_OK)
1411 return retval;
1412 breakpoint->set = 0x11; /* Any nice value but 0 */
1413 }
1414
1415 return ERROR_OK;
1416 }
1417
1418 static int cortex_a8_set_context_breakpoint(struct target *target,
1419 struct breakpoint *breakpoint, uint8_t matchmode)
1420 {
1421 int retval = ERROR_FAIL;
1422 int brp_i = 0;
1423 uint32_t control;
1424 uint8_t byte_addr_select = 0x0F;
1425 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1426 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1427 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1428
1429 if (breakpoint->set) {
1430 LOG_WARNING("breakpoint already set");
1431 return retval;
1432 }
1433 /*check available context BRPs*/
1434 while ((brp_list[brp_i].used ||
1435 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1436 brp_i++;
1437
1438 if (brp_i >= cortex_a8->brp_num) {
1439 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1440 return ERROR_FAIL;
1441 }
1442
1443 breakpoint->set = brp_i + 1;
1444 control = ((matchmode & 0x7) << 20)
1445 | (byte_addr_select << 5)
1446 | (3 << 1) | 1;
1447 brp_list[brp_i].used = 1;
1448 brp_list[brp_i].value = (breakpoint->asid);
1449 brp_list[brp_i].control = control;
1450 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1451 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1452 brp_list[brp_i].value);
1453 if (retval != ERROR_OK)
1454 return retval;
1455 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1456 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1457 brp_list[brp_i].control);
1458 if (retval != ERROR_OK)
1459 return retval;
1460 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1461 brp_list[brp_i].control,
1462 brp_list[brp_i].value);
1463 return ERROR_OK;
1464
1465 }
1466
1467 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1468 {
1469 int retval = ERROR_FAIL;
1470 int brp_1 = 0; /* holds the contextID pair */
1471 int brp_2 = 0; /* holds the IVA pair */
1472 uint32_t control_CTX, control_IVA;
1473 uint8_t CTX_byte_addr_select = 0x0F;
1474 uint8_t IVA_byte_addr_select = 0x0F;
1475 uint8_t CTX_machmode = 0x03;
1476 uint8_t IVA_machmode = 0x01;
1477 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1478 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1479 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1480
1481 if (breakpoint->set) {
1482 LOG_WARNING("breakpoint already set");
1483 return retval;
1484 }
1485 /*check available context BRPs*/
1486 while ((brp_list[brp_1].used ||
1487 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1488 brp_1++;
1489
1490 printf("brp(CTX) found num: %d\n", brp_1);
1491 if (brp_1 >= cortex_a8->brp_num) {
1492 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1493 return ERROR_FAIL;
1494 }
1495
1496 while ((brp_list[brp_2].used ||
1497 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1498 brp_2++;
1499
1500 printf("brp(IVA) found num: %d\n", brp_2);
1501 if (brp_2 >= cortex_a8->brp_num) {
1502 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1503 return ERROR_FAIL;
1504 }
1505
1506 breakpoint->set = brp_1 + 1;
1507 breakpoint->linked_BRP = brp_2;
1508 control_CTX = ((CTX_machmode & 0x7) << 20)
1509 | (brp_2 << 16)
1510 | (0 << 14)
1511 | (CTX_byte_addr_select << 5)
1512 | (3 << 1) | 1;
1513 brp_list[brp_1].used = 1;
1514 brp_list[brp_1].value = (breakpoint->asid);
1515 brp_list[brp_1].control = control_CTX;
1516 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1517 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1518 brp_list[brp_1].value);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1522 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1523 brp_list[brp_1].control);
1524 if (retval != ERROR_OK)
1525 return retval;
1526
1527 control_IVA = ((IVA_machmode & 0x7) << 20)
1528 | (brp_1 << 16)
1529 | (IVA_byte_addr_select << 5)
1530 | (3 << 1) | 1;
1531 brp_list[brp_2].used = 1;
1532 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1533 brp_list[brp_2].control = control_IVA;
1534 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1535 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1536 brp_list[brp_2].value);
1537 if (retval != ERROR_OK)
1538 return retval;
1539 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1540 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1541 brp_list[brp_2].control);
1542 if (retval != ERROR_OK)
1543 return retval;
1544
1545 return ERROR_OK;
1546 }
1547
1548 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1549 {
1550 int retval;
1551 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1552 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1553 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1554
1555 if (!breakpoint->set) {
1556 LOG_WARNING("breakpoint not set");
1557 return ERROR_OK;
1558 }
1559
1560 if (breakpoint->type == BKPT_HARD) {
1561 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1562 int brp_i = breakpoint->set - 1;
1563 int brp_j = breakpoint->linked_BRP;
1564 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1565 LOG_DEBUG("Invalid BRP number in breakpoint");
1566 return ERROR_OK;
1567 }
1568 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1569 brp_list[brp_i].control, brp_list[brp_i].value);
1570 brp_list[brp_i].used = 0;
1571 brp_list[brp_i].value = 0;
1572 brp_list[brp_i].control = 0;
1573 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1574 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1575 brp_list[brp_i].control);
1576 if (retval != ERROR_OK)
1577 return retval;
1578 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1579 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1580 brp_list[brp_i].value);
1581 if (retval != ERROR_OK)
1582 return retval;
1583 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num)) {
1584 LOG_DEBUG("Invalid BRP number in breakpoint");
1585 return ERROR_OK;
1586 }
1587 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1588 brp_list[brp_j].control, brp_list[brp_j].value);
1589 brp_list[brp_j].used = 0;
1590 brp_list[brp_j].value = 0;
1591 brp_list[brp_j].control = 0;
1592 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1593 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1594 brp_list[brp_j].control);
1595 if (retval != ERROR_OK)
1596 return retval;
1597 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1598 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1599 brp_list[brp_j].value);
1600 if (retval != ERROR_OK)
1601 return retval;
1602 breakpoint->linked_BRP = 0;
1603 breakpoint->set = 0;
1604 return ERROR_OK;
1605
1606 } else {
1607 int brp_i = breakpoint->set - 1;
1608 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1609 LOG_DEBUG("Invalid BRP number in breakpoint");
1610 return ERROR_OK;
1611 }
1612 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1613 brp_list[brp_i].control, brp_list[brp_i].value);
1614 brp_list[brp_i].used = 0;
1615 brp_list[brp_i].value = 0;
1616 brp_list[brp_i].control = 0;
1617 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1618 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1619 brp_list[brp_i].control);
1620 if (retval != ERROR_OK)
1621 return retval;
1622 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1623 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1624 brp_list[brp_i].value);
1625 if (retval != ERROR_OK)
1626 return retval;
1627 breakpoint->set = 0;
1628 return ERROR_OK;
1629 }
1630 } else {
1631 /* restore original instruction (kept in target endianness) */
1632 if (breakpoint->length == 4) {
1633 retval = target_write_memory(target,
1634 breakpoint->address & 0xFFFFFFFE,
1635 4, 1, breakpoint->orig_instr);
1636 if (retval != ERROR_OK)
1637 return retval;
1638 } else {
1639 retval = target_write_memory(target,
1640 breakpoint->address & 0xFFFFFFFE,
1641 2, 1, breakpoint->orig_instr);
1642 if (retval != ERROR_OK)
1643 return retval;
1644 }
1645 }
1646 breakpoint->set = 0;
1647
1648 return ERROR_OK;
1649 }
1650
1651 static int cortex_a8_add_breakpoint(struct target *target,
1652 struct breakpoint *breakpoint)
1653 {
1654 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1655
1656 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1657 LOG_INFO("no hardware breakpoint available");
1658 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1659 }
1660
1661 if (breakpoint->type == BKPT_HARD)
1662 cortex_a8->brp_num_available--;
1663
1664 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1665 }
1666
1667 static int cortex_a8_add_context_breakpoint(struct target *target,
1668 struct breakpoint *breakpoint)
1669 {
1670 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1671
1672 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1673 LOG_INFO("no hardware breakpoint available");
1674 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1675 }
1676
1677 if (breakpoint->type == BKPT_HARD)
1678 cortex_a8->brp_num_available--;
1679
1680 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1681 }
1682
1683 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1684 struct breakpoint *breakpoint)
1685 {
1686 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1687
1688 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1689 LOG_INFO("no hardware breakpoint available");
1690 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1691 }
1692
1693 if (breakpoint->type == BKPT_HARD)
1694 cortex_a8->brp_num_available--;
1695
1696 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1697 }
1698
1699
1700 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1701 {
1702 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1703
1704 #if 0
1705 /* It is perfectly possible to remove breakpoints while the target is running */
1706 if (target->state != TARGET_HALTED) {
1707 LOG_WARNING("target not halted");
1708 return ERROR_TARGET_NOT_HALTED;
1709 }
1710 #endif
1711
1712 if (breakpoint->set) {
1713 cortex_a8_unset_breakpoint(target, breakpoint);
1714 if (breakpoint->type == BKPT_HARD)
1715 cortex_a8->brp_num_available++;
1716 }
1717
1718
1719 return ERROR_OK;
1720 }
1721
1722 /*
1723 * Cortex-A8 Reset functions
1724 */
1725
1726 static int cortex_a8_assert_reset(struct target *target)
1727 {
1728 struct armv7a_common *armv7a = target_to_armv7a(target);
1729
1730 LOG_DEBUG(" ");
1731
1732 /* FIXME when halt is requested, make it work somehow... */
1733
1734 /* Issue some kind of warm reset. */
1735 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1736 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1737 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1738 /* REVISIT handle "pulls" cases, if there's
1739 * hardware that needs them to work.
1740 */
1741 jtag_add_reset(0, 1);
1742 } else {
1743 LOG_ERROR("%s: how to reset?", target_name(target));
1744 return ERROR_FAIL;
1745 }
1746
1747 /* registers are now invalid */
1748 register_cache_invalidate(armv7a->arm.core_cache);
1749
1750 target->state = TARGET_RESET;
1751
1752 return ERROR_OK;
1753 }
1754
1755 static int cortex_a8_deassert_reset(struct target *target)
1756 {
1757 int retval;
1758
1759 LOG_DEBUG(" ");
1760
1761 /* be certain SRST is off */
1762 jtag_add_reset(0, 0);
1763
1764 retval = cortex_a8_poll(target);
1765 if (retval != ERROR_OK)
1766 return retval;
1767
1768 if (target->reset_halt) {
1769 if (target->state != TARGET_HALTED) {
1770 LOG_WARNING("%s: ran after reset and before halt ...",
1771 target_name(target));
1772 retval = target_halt(target);
1773 if (retval != ERROR_OK)
1774 return retval;
1775 }
1776 }
1777
1778 return ERROR_OK;
1779 }
1780
1781 static int cortex_a8_write_apb_ab_memory(struct target *target,
1782 uint32_t address, uint32_t size,
1783 uint32_t count, const uint8_t *buffer)
1784 {
1785 /* write memory through APB-AP */
1786
1787 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1788 struct armv7a_common *armv7a = target_to_armv7a(target);
1789 struct arm *arm = &armv7a->arm;
1790 int total_bytes = count * size;
1791 int start_byte, nbytes_to_write, i;
1792 struct reg *reg;
1793 union _data {
1794 uint8_t uc_a[4];
1795 uint32_t ui;
1796 } data;
1797
1798 if (target->state != TARGET_HALTED) {
1799 LOG_WARNING("target not halted");
1800 return ERROR_TARGET_NOT_HALTED;
1801 }
1802
1803 reg = arm_reg_current(arm, 0);
1804 reg->dirty = 1;
1805 reg = arm_reg_current(arm, 1);
1806 reg->dirty = 1;
1807
1808 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1809 if (retval != ERROR_OK)
1810 return retval;
1811
1812 start_byte = address & 0x3;
1813
1814 while (total_bytes > 0) {
1815
1816 nbytes_to_write = 4 - start_byte;
1817 if (total_bytes < nbytes_to_write)
1818 nbytes_to_write = total_bytes;
1819
1820 if (nbytes_to_write != 4) {
1821
1822 /* execute instruction LDR r1, [r0] */
1823 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1824 if (retval != ERROR_OK)
1825 return retval;
1826
1827 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1828 if (retval != ERROR_OK)
1829 return retval;
1830 }
1831
1832 for (i = 0; i < nbytes_to_write; ++i)
1833 data.uc_a[i + start_byte] = *buffer++;
1834
1835 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1836 if (retval != ERROR_OK)
1837 return retval;
1838
1839 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1840 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0), NULL);
1841 if (retval != ERROR_OK)
1842 return retval;
1843
1844 total_bytes -= nbytes_to_write;
1845 start_byte = 0;
1846 }
1847
1848 return retval;
1849 }
1850
1851
1852 static int cortex_a8_read_apb_ab_memory(struct target *target,
1853 uint32_t address, uint32_t size,
1854 uint32_t count, uint8_t *buffer)
1855 {
1856
1857 /* read memory through APB-AP */
1858
1859 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1860 struct armv7a_common *armv7a = target_to_armv7a(target);
1861 struct arm *arm = &armv7a->arm;
1862 int total_bytes = count * size;
1863 int start_byte, nbytes_to_read, i;
1864 struct reg *reg;
1865 union _data {
1866 uint8_t uc_a[4];
1867 uint32_t ui;
1868 } data;
1869
1870 if (target->state != TARGET_HALTED) {
1871 LOG_WARNING("target not halted");
1872 return ERROR_TARGET_NOT_HALTED;
1873 }
1874
1875 reg = arm_reg_current(arm, 0);
1876 reg->dirty = 1;
1877 reg = arm_reg_current(arm, 1);
1878 reg->dirty = 1;
1879
1880 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1881 if (retval != ERROR_OK)
1882 return retval;
1883
1884 start_byte = address & 0x3;
1885
1886 while (total_bytes > 0) {
1887
1888 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1889 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
1890 if (retval != ERROR_OK)
1891 return retval;
1892
1893 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1894 if (retval != ERROR_OK)
1895 return retval;
1896
1897 nbytes_to_read = 4 - start_byte;
1898 if (total_bytes < nbytes_to_read)
1899 nbytes_to_read = total_bytes;
1900
1901 for (i = 0; i < nbytes_to_read; ++i)
1902 *buffer++ = data.uc_a[i + start_byte];
1903
1904 total_bytes -= nbytes_to_read;
1905 start_byte = 0;
1906 }
1907
1908 return retval;
1909 }
1910
1911
1912
1913 /*
1914 * Cortex-A8 Memory access
1915 *
1916 * This is same Cortex M3 but we must also use the correct
1917 * ap number for every access.
1918 */
1919
1920 static int cortex_a8_read_phys_memory(struct target *target,
1921 uint32_t address, uint32_t size,
1922 uint32_t count, uint8_t *buffer)
1923 {
1924 struct armv7a_common *armv7a = target_to_armv7a(target);
1925 struct adiv5_dap *swjdp = armv7a->arm.dap;
1926 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1927 uint8_t apsel = swjdp->apsel;
1928 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1929 address, size, count);
1930
1931 if (count && buffer) {
1932
1933 if (apsel == swjdp_memoryap) {
1934
1935 /* read memory through AHB-AP */
1936
1937 switch (size) {
1938 case 4:
1939 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1940 buffer, 4 * count, address);
1941 break;
1942 case 2:
1943 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1944 buffer, 2 * count, address);
1945 break;
1946 case 1:
1947 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1948 buffer, count, address);
1949 break;
1950 }
1951 } else {
1952
1953 /* read memory through APB-AP
1954 * disable mmu */
1955 retval = cortex_a8_mmu_modify(target, 0);
1956 if (retval != ERROR_OK)
1957 return retval;
1958 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1959 }
1960 }
1961 return retval;
1962 }
1963
1964 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1965 uint32_t size, uint32_t count, uint8_t *buffer)
1966 {
1967 int enabled = 0;
1968 uint32_t virt, phys;
1969 int retval;
1970 struct armv7a_common *armv7a = target_to_armv7a(target);
1971 struct adiv5_dap *swjdp = armv7a->arm.dap;
1972 uint8_t apsel = swjdp->apsel;
1973
1974 /* cortex_a8 handles unaligned memory access */
1975 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1976 size, count);
1977 if (apsel == swjdp_memoryap) {
1978 retval = cortex_a8_mmu(target, &enabled);
1979 if (retval != ERROR_OK)
1980 return retval;
1981
1982
1983 if (enabled) {
1984 virt = address;
1985 retval = cortex_a8_virt2phys(target, virt, &phys);
1986 if (retval != ERROR_OK)
1987 return retval;
1988
1989 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
1990 virt, phys);
1991 address = phys;
1992 }
1993 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
1994 } else {
1995 retval = cortex_a8_check_address(target, address);
1996 if (retval != ERROR_OK)
1997 return retval;
1998 /* enable mmu */
1999 retval = cortex_a8_mmu_modify(target, 1);
2000 if (retval != ERROR_OK)
2001 return retval;
2002 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2003 }
2004 return retval;
2005 }
2006
2007 static int cortex_a8_write_phys_memory(struct target *target,
2008 uint32_t address, uint32_t size,
2009 uint32_t count, const uint8_t *buffer)
2010 {
2011 struct armv7a_common *armv7a = target_to_armv7a(target);
2012 struct adiv5_dap *swjdp = armv7a->arm.dap;
2013 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2014 uint8_t apsel = swjdp->apsel;
2015
2016 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2017 size, count);
2018
2019 if (count && buffer) {
2020
2021 if (apsel == swjdp_memoryap) {
2022
2023 /* write memory through AHB-AP */
2024
2025 switch (size) {
2026 case 4:
2027 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
2028 buffer, 4 * count, address);
2029 break;
2030 case 2:
2031 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
2032 buffer, 2 * count, address);
2033 break;
2034 case 1:
2035 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
2036 buffer, count, address);
2037 break;
2038 }
2039
2040 } else {
2041
2042 /* write memory through APB-AP */
2043 retval = cortex_a8_mmu_modify(target, 0);
2044 if (retval != ERROR_OK)
2045 return retval;
2046 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2047 }
2048 }
2049
2050
2051 /* REVISIT this op is generic ARMv7-A/R stuff */
2052 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2053 struct arm_dpm *dpm = armv7a->arm.dpm;
2054
2055 retval = dpm->prepare(dpm);
2056 if (retval != ERROR_OK)
2057 return retval;
2058
2059 /* The Cache handling will NOT work with MMU active, the
2060 * wrong addresses will be invalidated!
2061 *
2062 * For both ICache and DCache, walk all cache lines in the
2063 * address range. Cortex-A8 has fixed 64 byte line length.
2064 *
2065 * REVISIT per ARMv7, these may trigger watchpoints ...
2066 */
2067
2068 /* invalidate I-Cache */
2069 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2070 /* ICIMVAU - Invalidate Cache single entry
2071 * with MVA to PoU
2072 * MCR p15, 0, r0, c7, c5, 1
2073 */
2074 for (uint32_t cacheline = address;
2075 cacheline < address + size * count;
2076 cacheline += 64) {
2077 retval = dpm->instr_write_data_r0(dpm,
2078 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2079 cacheline);
2080 if (retval != ERROR_OK)
2081 return retval;
2082 }
2083 }
2084
2085 /* invalidate D-Cache */
2086 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2087 /* DCIMVAC - Invalidate data Cache line
2088 * with MVA to PoC
2089 * MCR p15, 0, r0, c7, c6, 1
2090 */
2091 for (uint32_t cacheline = address;
2092 cacheline < address + size * count;
2093 cacheline += 64) {
2094 retval = dpm->instr_write_data_r0(dpm,
2095 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2096 cacheline);
2097 if (retval != ERROR_OK)
2098 return retval;
2099 }
2100 }
2101
2102 /* (void) */ dpm->finish(dpm);
2103 }
2104
2105 return retval;
2106 }
2107
2108 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2109 uint32_t size, uint32_t count, const uint8_t *buffer)
2110 {
2111 int enabled = 0;
2112 uint32_t virt, phys;
2113 int retval;
2114 struct armv7a_common *armv7a = target_to_armv7a(target);
2115 struct adiv5_dap *swjdp = armv7a->arm.dap;
2116 uint8_t apsel = swjdp->apsel;
2117 /* cortex_a8 handles unaligned memory access */
2118 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2119 size, count);
2120 if (apsel == swjdp_memoryap) {
2121
2122 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size,
2123 count);
2124 retval = cortex_a8_mmu(target, &enabled);
2125 if (retval != ERROR_OK)
2126 return retval;
2127
2128 if (enabled) {
2129 virt = address;
2130 retval = cortex_a8_virt2phys(target, virt, &phys);
2131 if (retval != ERROR_OK)
2132 return retval;
2133 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x",
2134 virt,
2135 phys);
2136 address = phys;
2137 }
2138
2139 retval = cortex_a8_write_phys_memory(target, address, size,
2140 count, buffer);
2141 } else {
2142 retval = cortex_a8_check_address(target, address);
2143 if (retval != ERROR_OK)
2144 return retval;
2145 /* enable mmu */
2146 retval = cortex_a8_mmu_modify(target, 1);
2147 if (retval != ERROR_OK)
2148 return retval;
2149 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2150 }
2151 return retval;
2152 }
2153
2154 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2155 uint32_t count, const uint8_t *buffer)
2156 {
2157 return cortex_a8_write_memory(target, address, 4, count, buffer);
2158 }
2159
2160 static int cortex_a8_handle_target_request(void *priv)
2161 {
2162 struct target *target = priv;
2163 struct armv7a_common *armv7a = target_to_armv7a(target);
2164 struct adiv5_dap *swjdp = armv7a->arm.dap;
2165 int retval;
2166
2167 if (!target_was_examined(target))
2168 return ERROR_OK;
2169 if (!target->dbg_msg_enabled)
2170 return ERROR_OK;
2171
2172 if (target->state == TARGET_RUNNING) {
2173 uint32_t request;
2174 uint32_t dscr;
2175 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2176 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2177
2178 /* check if we have data */
2179 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2180 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2181 armv7a->debug_base + CPUDBG_DTRTX, &request);
2182 if (retval == ERROR_OK) {
2183 target_request(target, request);
2184 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2185 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2186 }
2187 }
2188 }
2189
2190 return ERROR_OK;
2191 }
2192
2193 /*
2194 * Cortex-A8 target information and configuration
2195 */
2196
2197 static int cortex_a8_examine_first(struct target *target)
2198 {
2199 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2200 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2201 struct adiv5_dap *swjdp = armv7a->arm.dap;
2202 int i;
2203 int retval = ERROR_OK;
2204 uint32_t didr, ctypr, ttypr, cpuid;
2205
2206 /* We do one extra read to ensure DAP is configured,
2207 * we call ahbap_debugport_init(swjdp) instead
2208 */
2209 retval = ahbap_debugport_init(swjdp);
2210 if (retval != ERROR_OK)
2211 return retval;
2212
2213 if (!target->dbgbase_set) {
2214 uint32_t dbgbase;
2215 /* Get ROM Table base */
2216 uint32_t apid;
2217 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2218 if (retval != ERROR_OK)
2219 return retval;
2220 /* Lookup 0x15 -- Processor DAP */
2221 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2222 &armv7a->debug_base);
2223 if (retval != ERROR_OK)
2224 return retval;
2225 } else
2226 armv7a->debug_base = target->dbgbase;
2227
2228 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2229 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2230 if (retval != ERROR_OK)
2231 return retval;
2232
2233 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2234 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2235 if (retval != ERROR_OK) {
2236 LOG_DEBUG("Examine %s failed", "CPUID");
2237 return retval;
2238 }
2239
2240 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2241 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2242 if (retval != ERROR_OK) {
2243 LOG_DEBUG("Examine %s failed", "CTYPR");
2244 return retval;
2245 }
2246
2247 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2248 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2249 if (retval != ERROR_OK) {
2250 LOG_DEBUG("Examine %s failed", "TTYPR");
2251 return retval;
2252 }
2253
2254 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2255 armv7a->debug_base + CPUDBG_DIDR, &didr);
2256 if (retval != ERROR_OK) {
2257 LOG_DEBUG("Examine %s failed", "DIDR");
2258 return retval;
2259 }
2260
2261 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2262 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2263 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2264 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2265
2266 armv7a->arm.core_type = ARM_MODE_MON;
2267 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2268 if (retval != ERROR_OK)
2269 return retval;
2270
2271 /* Setup Breakpoint Register Pairs */
2272 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2273 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2274 cortex_a8->brp_num_available = cortex_a8->brp_num;
2275 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2276 /* cortex_a8->brb_enabled = ????; */
2277 for (i = 0; i < cortex_a8->brp_num; i++) {
2278 cortex_a8->brp_list[i].used = 0;
2279 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2280 cortex_a8->brp_list[i].type = BRP_NORMAL;
2281 else
2282 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2283 cortex_a8->brp_list[i].value = 0;
2284 cortex_a8->brp_list[i].control = 0;
2285 cortex_a8->brp_list[i].BRPn = i;
2286 }
2287
2288 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2289
2290 target_set_examined(target);
2291 return ERROR_OK;
2292 }
2293
2294 static int cortex_a8_examine(struct target *target)
2295 {
2296 int retval = ERROR_OK;
2297
2298 /* don't re-probe hardware after each reset */
2299 if (!target_was_examined(target))
2300 retval = cortex_a8_examine_first(target);
2301
2302 /* Configure core debug access */
2303 if (retval == ERROR_OK)
2304 retval = cortex_a8_init_debug_access(target);
2305
2306 return retval;
2307 }
2308
2309 /*
2310 * Cortex-A8 target creation and initialization
2311 */
2312
2313 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2314 struct target *target)
2315 {
2316 /* examine_first() does a bunch of this */
2317 return ERROR_OK;
2318 }
2319
2320 static int cortex_a8_init_arch_info(struct target *target,
2321 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2322 {
2323 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2324 struct adiv5_dap *dap = &armv7a->dap;
2325
2326 armv7a->arm.dap = dap;
2327
2328 /* Setup struct cortex_a8_common */
2329 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2330 /* tap has no dap initialized */
2331 if (!tap->dap) {
2332 armv7a->arm.dap = dap;
2333 /* Setup struct cortex_a8_common */
2334
2335 /* prepare JTAG information for the new target */
2336 cortex_a8->jtag_info.tap = tap;
2337 cortex_a8->jtag_info.scann_size = 4;
2338
2339 /* Leave (only) generic DAP stuff for debugport_init() */
2340 dap->jtag_info = &cortex_a8->jtag_info;
2341
2342 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2343 dap->tar_autoincr_block = (1 << 10);
2344 dap->memaccess_tck = 80;
2345 tap->dap = dap;
2346 } else
2347 armv7a->arm.dap = tap->dap;
2348
2349 cortex_a8->fast_reg_read = 0;
2350
2351 /* register arch-specific functions */
2352 armv7a->examine_debug_reason = NULL;
2353
2354 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2355
2356 armv7a->pre_restore_context = NULL;
2357
2358 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2359
2360
2361 /* arm7_9->handle_target_request = cortex_a8_handle_target_request; */
2362
2363 /* REVISIT v7a setup should be in a v7a-specific routine */
2364 armv7a_init_arch_info(target, armv7a);
2365 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2366
2367 return ERROR_OK;
2368 }
2369
2370 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2371 {
2372 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2373
2374 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2375 }
2376
2377
2378
2379 static int cortex_a8_mmu(struct target *target, int *enabled)
2380 {
2381 if (target->state != TARGET_HALTED) {
2382 LOG_ERROR("%s: target not halted", __func__);
2383 return ERROR_TARGET_INVALID;
2384 }
2385
2386 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2387 return ERROR_OK;
2388 }
2389
2390 static int cortex_a8_virt2phys(struct target *target,
2391 uint32_t virt, uint32_t *phys)
2392 {
2393 int retval = ERROR_FAIL;
2394 struct armv7a_common *armv7a = target_to_armv7a(target);
2395 struct adiv5_dap *swjdp = armv7a->arm.dap;
2396 uint8_t apsel = swjdp->apsel;
2397 if (apsel == swjdp_memoryap) {
2398 uint32_t ret;
2399 retval = armv7a_mmu_translate_va(target,
2400 virt, &ret);
2401 if (retval != ERROR_OK)
2402 goto done;
2403 *phys = ret;
2404 } else {/* use this method if swjdp_memoryap not selected
2405 * mmu must be enable in order to get a correct translation */
2406 retval = cortex_a8_mmu_modify(target, 1);
2407 if (retval != ERROR_OK)
2408 goto done;
2409 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2410 }
2411 done:
2412 return retval;
2413 }
2414
2415 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2416 {
2417 struct target *target = get_current_target(CMD_CTX);
2418 struct armv7a_common *armv7a = target_to_armv7a(target);
2419
2420 return armv7a_handle_cache_info_command(CMD_CTX,
2421 &armv7a->armv7a_mmu.armv7a_cache);
2422 }
2423
2424
2425 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2426 {
2427 struct target *target = get_current_target(CMD_CTX);
2428 if (!target_was_examined(target)) {
2429 LOG_ERROR("target not examined yet");
2430 return ERROR_FAIL;
2431 }
2432
2433 return cortex_a8_init_debug_access(target);
2434 }
2435 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2436 {
2437 struct target *target = get_current_target(CMD_CTX);
2438 /* check target is an smp target */
2439 struct target_list *head;
2440 struct target *curr;
2441 head = target->head;
2442 target->smp = 0;
2443 if (head != (struct target_list *)NULL) {
2444 while (head != (struct target_list *)NULL) {
2445 curr = head->target;
2446 curr->smp = 0;
2447 head = head->next;
2448 }
2449 /* fixes the target display to the debugger */
2450 target->gdb_service->target = target;
2451 }
2452 return ERROR_OK;
2453 }
2454
2455 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2456 {
2457 struct target *target = get_current_target(CMD_CTX);
2458 struct target_list *head;
2459 struct target *curr;
2460 head = target->head;
2461 if (head != (struct target_list *)NULL) {
2462 target->smp = 1;
2463 while (head != (struct target_list *)NULL) {
2464 curr = head->target;
2465 curr->smp = 1;
2466 head = head->next;
2467 }
2468 }
2469 return ERROR_OK;
2470 }
2471
2472 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2473 {
2474 struct target *target = get_current_target(CMD_CTX);
2475 int retval = ERROR_OK;
2476 struct target_list *head;
2477 head = target->head;
2478 if (head != (struct target_list *)NULL) {
2479 if (CMD_ARGC == 1) {
2480 int coreid = 0;
2481 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2482 if (ERROR_OK != retval)
2483 return retval;
2484 target->gdb_service->core[1] = coreid;
2485
2486 }
2487 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2488 , target->gdb_service->core[1]);
2489 }
2490 return ERROR_OK;
2491 }
2492
2493 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2494 {
2495 .name = "cache_info",
2496 .handler = cortex_a8_handle_cache_info_command,
2497 .mode = COMMAND_EXEC,
2498 .help = "display information about target caches",
2499 .usage = "",
2500 },
2501 {
2502 .name = "dbginit",
2503 .handler = cortex_a8_handle_dbginit_command,
2504 .mode = COMMAND_EXEC,
2505 .help = "Initialize core debug",
2506 .usage = "",
2507 },
2508 { .name = "smp_off",
2509 .handler = cortex_a8_handle_smp_off_command,
2510 .mode = COMMAND_EXEC,
2511 .help = "Stop smp handling",
2512 .usage = "",},
2513 {
2514 .name = "smp_on",
2515 .handler = cortex_a8_handle_smp_on_command,
2516 .mode = COMMAND_EXEC,
2517 .help = "Restart smp handling",
2518 .usage = "",
2519 },
2520 {
2521 .name = "smp_gdb",
2522 .handler = cortex_a8_handle_smp_gdb_command,
2523 .mode = COMMAND_EXEC,
2524 .help = "display/fix current core played to gdb",
2525 .usage = "",
2526 },
2527
2528
2529 COMMAND_REGISTRATION_DONE
2530 };
2531 static const struct command_registration cortex_a8_command_handlers[] = {
2532 {
2533 .chain = arm_command_handlers,
2534 },
2535 {
2536 .chain = armv7a_command_handlers,
2537 },
2538 {
2539 .name = "cortex_a8",
2540 .mode = COMMAND_ANY,
2541 .help = "Cortex-A8 command group",
2542 .usage = "",
2543 .chain = cortex_a8_exec_command_handlers,
2544 },
2545 COMMAND_REGISTRATION_DONE
2546 };
2547
2548 struct target_type cortexa8_target = {
2549 .name = "cortex_a8",
2550
2551 .poll = cortex_a8_poll,
2552 .arch_state = armv7a_arch_state,
2553
2554 .target_request_data = NULL,
2555
2556 .halt = cortex_a8_halt,
2557 .resume = cortex_a8_resume,
2558 .step = cortex_a8_step,
2559
2560 .assert_reset = cortex_a8_assert_reset,
2561 .deassert_reset = cortex_a8_deassert_reset,
2562 .soft_reset_halt = NULL,
2563
2564 /* REVISIT allow exporting VFP3 registers ... */
2565 .get_gdb_reg_list = arm_get_gdb_reg_list,
2566
2567 .read_memory = cortex_a8_read_memory,
2568 .write_memory = cortex_a8_write_memory,
2569 .bulk_write_memory = cortex_a8_bulk_write_memory,
2570
2571 .checksum_memory = arm_checksum_memory,
2572 .blank_check_memory = arm_blank_check_memory,
2573
2574 .run_algorithm = armv4_5_run_algorithm,
2575
2576 .add_breakpoint = cortex_a8_add_breakpoint,
2577 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2578 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2579 .remove_breakpoint = cortex_a8_remove_breakpoint,
2580 .add_watchpoint = NULL,
2581 .remove_watchpoint = NULL,
2582
2583 .commands = cortex_a8_command_handlers,
2584 .target_create = cortex_a8_target_create,
2585 .init_target = cortex_a8_init_target,
2586 .examine = cortex_a8_examine,
2587
2588 .read_phys_memory = cortex_a8_read_phys_memory,
2589 .write_phys_memory = cortex_a8_write_phys_memory,
2590 .mmu = cortex_a8_mmu,
2591 .virt2phys = cortex_a8_virt2phys,
2592 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)