1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * Cortex-A9(tm) TRM, ARM DDI 0407F *
35 ***************************************************************************/
40 #include "breakpoints.h"
43 #include "target_request.h"
44 #include "target_type.h"
45 #include "arm_opcodes.h"
46 #include <helper/time_support.h>
48 static int cortex_a8_poll(struct target
*target
);
49 static int cortex_a8_debug_entry(struct target
*target
);
50 static int cortex_a8_restore_context(struct target
*target
, bool bpwp
);
51 static int cortex_a8_set_breakpoint(struct target
*target
,
52 struct breakpoint
*breakpoint
, uint8_t matchmode
);
53 static int cortex_a8_unset_breakpoint(struct target
*target
,
54 struct breakpoint
*breakpoint
);
55 static int cortex_a8_dap_read_coreregister_u32(struct target
*target
,
56 uint32_t *value
, int regnum
);
57 static int cortex_a8_dap_write_coreregister_u32(struct target
*target
,
58 uint32_t value
, int regnum
);
59 static int cortex_a8_mmu(struct target
*target
, int *enabled
);
60 static int cortex_a8_virt2phys(struct target
*target
,
61 uint32_t virt
, uint32_t *phys
);
62 static int cortex_a8_disable_mmu_caches(struct target
*target
, int mmu
,
63 int d_u_cache
, int i_cache
);
64 static int cortex_a8_enable_mmu_caches(struct target
*target
, int mmu
,
65 int d_u_cache
, int i_cache
);
66 static int cortex_a8_get_ttb(struct target
*target
, uint32_t *result
);
70 * FIXME do topology discovery using the ROM; don't
71 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
72 * cores, with different AP numbering ... don't use a #define
73 * for these numbers, use per-core armv7a state.
75 #define swjdp_memoryap 0
76 #define swjdp_debugap 1
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
81 static int cortex_a8_init_debug_access(struct target
*target
)
83 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
84 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
90 /* Unlocking the debug registers for modification */
91 /* The debugport might be uninitialised so try twice */
92 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
93 armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
94 if (retval
!= ERROR_OK
)
97 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
98 armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
99 if (retval
== ERROR_OK
)
101 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
104 if (retval
!= ERROR_OK
)
106 /* Clear Sticky Power Down status Bit in PRSR to enable access to
107 the registers in the Core Power Domain */
108 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
109 armv7a
->debug_base
+ CPUDBG_PRSR
, &dummy
);
110 if (retval
!= ERROR_OK
)
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
115 /* Resync breakpoint registers */
117 /* Since this is likely called from init or reset, update target state information*/
118 return cortex_a8_poll(target
);
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
126 static int cortex_a8_exec_opcode(struct target
*target
,
127 uint32_t opcode
, uint32_t *dscr_p
)
131 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
132 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
134 dscr
= dscr_p
? *dscr_p
: 0;
136 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
138 /* Wait for InstrCompl bit to be set */
139 long long then
= timeval_ms();
140 while ((dscr
& DSCR_INSTR_COMP
) == 0)
142 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
143 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
144 if (retval
!= ERROR_OK
)
146 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
149 if (timeval_ms() > then
+ 1000)
151 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
156 retval
= mem_ap_sel_write_u32(swjdp
, swjdp_debugap
,
157 armv7a
->debug_base
+ CPUDBG_ITR
, opcode
);
158 if (retval
!= ERROR_OK
)
164 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
165 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
166 if (retval
!= ERROR_OK
)
168 LOG_ERROR("Could not read DSCR register");
171 if (timeval_ms() > then
+ 1000)
173 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
177 while ((dscr
& DSCR_INSTR_COMP
) == 0); /* Wait for InstrCompl bit to be set */
185 /**************************************************************************
186 Read core register with very few exec_opcode, fast but needs work_area.
187 This can cause problems with MMU active.
188 **************************************************************************/
189 static int cortex_a8_read_regs_through_mem(struct target
*target
, uint32_t address
,
192 int retval
= ERROR_OK
;
193 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
194 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
196 retval
= cortex_a8_dap_read_coreregister_u32(target
, regfile
, 0);
197 if (retval
!= ERROR_OK
)
199 retval
= cortex_a8_dap_write_coreregister_u32(target
, address
, 0);
200 if (retval
!= ERROR_OK
)
202 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL
);
203 if (retval
!= ERROR_OK
)
206 retval
= mem_ap_sel_read_buf_u32(swjdp
, swjdp_memoryap
,
207 (uint8_t *)(®file
[1]), 4*15, address
);
212 static int cortex_a8_dap_read_coreregister_u32(struct target
*target
,
213 uint32_t *value
, int regnum
)
215 int retval
= ERROR_OK
;
216 uint8_t reg
= regnum
&0xFF;
218 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
219 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
226 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
227 retval
= cortex_a8_exec_opcode(target
,
228 ARMV4_5_MCR(14, 0, reg
, 0, 5, 0),
230 if (retval
!= ERROR_OK
)
235 /* "MOV r0, r15"; then move r0 to DCCTX */
236 retval
= cortex_a8_exec_opcode(target
, 0xE1A0000F, &dscr
);
237 if (retval
!= ERROR_OK
)
239 retval
= cortex_a8_exec_opcode(target
,
240 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
242 if (retval
!= ERROR_OK
)
247 /* "MRS r0, CPSR" or "MRS r0, SPSR"
248 * then move r0 to DCCTX
250 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_MRS(0, reg
& 1), &dscr
);
251 if (retval
!= ERROR_OK
)
253 retval
= cortex_a8_exec_opcode(target
,
254 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
256 if (retval
!= ERROR_OK
)
260 /* Wait for DTRRXfull then read DTRRTX */
261 long long then
= timeval_ms();
262 while ((dscr
& DSCR_DTR_TX_FULL
) == 0)
264 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
265 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
266 if (retval
!= ERROR_OK
)
268 if (timeval_ms() > then
+ 1000)
270 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
275 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
276 armv7a
->debug_base
+ CPUDBG_DTRTX
, value
);
277 LOG_DEBUG("read DCC 0x%08" PRIx32
, *value
);
282 static int cortex_a8_dap_write_coreregister_u32(struct target
*target
,
283 uint32_t value
, int regnum
)
285 int retval
= ERROR_OK
;
286 uint8_t Rd
= regnum
&0xFF;
288 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
289 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
291 LOG_DEBUG("register %i, value 0x%08" PRIx32
, regnum
, value
);
293 /* Check that DCCRX is not full */
294 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
295 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
296 if (retval
!= ERROR_OK
)
298 if (dscr
& DSCR_DTR_RX_FULL
)
300 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
301 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
302 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
304 if (retval
!= ERROR_OK
)
311 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
312 LOG_DEBUG("write DCC 0x%08" PRIx32
, value
);
313 retval
= mem_ap_sel_write_u32(swjdp
, swjdp_debugap
,
314 armv7a
->debug_base
+ CPUDBG_DTRRX
, value
);
315 if (retval
!= ERROR_OK
)
320 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
321 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, Rd
, 0, 5, 0),
324 if (retval
!= ERROR_OK
)
329 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
332 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
334 if (retval
!= ERROR_OK
)
336 retval
= cortex_a8_exec_opcode(target
, 0xE1A0F000, &dscr
);
337 if (retval
!= ERROR_OK
)
342 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
343 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
345 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
347 if (retval
!= ERROR_OK
)
349 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_MSR_GP(0, 0xF, Rd
& 1),
351 if (retval
!= ERROR_OK
)
354 /* "Prefetch flush" after modifying execution status in CPSR */
357 retval
= cortex_a8_exec_opcode(target
,
358 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
360 if (retval
!= ERROR_OK
)
368 /* Write to memory mapped registers directly with no cache or mmu handling */
369 static int cortex_a8_dap_write_memap_register_u32(struct target
*target
, uint32_t address
, uint32_t value
)
372 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
373 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
375 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
, address
, value
);
381 * Cortex-A8 implementation of Debug Programmer's Model
383 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
384 * so there's no need to poll for it before executing an instruction.
386 * NOTE that in several of these cases the "stall" mode might be useful.
387 * It'd let us queue a few operations together... prepare/finish might
388 * be the places to enable/disable that mode.
391 static inline struct cortex_a8_common
*dpm_to_a8(struct arm_dpm
*dpm
)
393 return container_of(dpm
, struct cortex_a8_common
, armv7a_common
.dpm
);
396 static int cortex_a8_write_dcc(struct cortex_a8_common
*a8
, uint32_t data
)
398 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
399 return mem_ap_sel_write_u32(a8
->armv7a_common
.armv4_5_common
.dap
,
400 swjdp_debugap
,a8
->armv7a_common
.debug_base
+ CPUDBG_DTRRX
, data
);
403 static int cortex_a8_read_dcc(struct cortex_a8_common
*a8
, uint32_t *data
,
406 struct adiv5_dap
*swjdp
= a8
->armv7a_common
.armv4_5_common
.dap
;
407 uint32_t dscr
= DSCR_INSTR_COMP
;
413 /* Wait for DTRRXfull */
414 long long then
= timeval_ms();
415 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
416 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
417 a8
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
419 if (retval
!= ERROR_OK
)
421 if (timeval_ms() > then
+ 1000)
423 LOG_ERROR("Timeout waiting for read dcc");
428 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
429 a8
->armv7a_common
.debug_base
+ CPUDBG_DTRTX
, data
);
430 if (retval
!= ERROR_OK
)
432 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
440 static int cortex_a8_dpm_prepare(struct arm_dpm
*dpm
)
442 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
443 struct adiv5_dap
*swjdp
= a8
->armv7a_common
.armv4_5_common
.dap
;
447 /* set up invariant: INSTR_COMP is set after ever DPM operation */
448 long long then
= timeval_ms();
451 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
452 a8
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
454 if (retval
!= ERROR_OK
)
456 if ((dscr
& DSCR_INSTR_COMP
) != 0)
458 if (timeval_ms() > then
+ 1000)
460 LOG_ERROR("Timeout waiting for dpm prepare");
465 /* this "should never happen" ... */
466 if (dscr
& DSCR_DTR_RX_FULL
) {
467 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
469 retval
= cortex_a8_exec_opcode(
470 a8
->armv7a_common
.armv4_5_common
.target
,
471 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 if (retval
!= ERROR_OK
)
480 static int cortex_a8_dpm_finish(struct arm_dpm
*dpm
)
482 /* REVISIT what could be done here? */
486 static int cortex_a8_instr_write_data_dcc(struct arm_dpm
*dpm
,
487 uint32_t opcode
, uint32_t data
)
489 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
491 uint32_t dscr
= DSCR_INSTR_COMP
;
493 retval
= cortex_a8_write_dcc(a8
, data
);
494 if (retval
!= ERROR_OK
)
497 return cortex_a8_exec_opcode(
498 a8
->armv7a_common
.armv4_5_common
.target
,
503 static int cortex_a8_instr_write_data_r0(struct arm_dpm
*dpm
,
504 uint32_t opcode
, uint32_t data
)
506 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
507 uint32_t dscr
= DSCR_INSTR_COMP
;
510 retval
= cortex_a8_write_dcc(a8
, data
);
511 if (retval
!= ERROR_OK
)
514 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
515 retval
= cortex_a8_exec_opcode(
516 a8
->armv7a_common
.armv4_5_common
.target
,
517 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
519 if (retval
!= ERROR_OK
)
522 /* then the opcode, taking data from R0 */
523 retval
= cortex_a8_exec_opcode(
524 a8
->armv7a_common
.armv4_5_common
.target
,
531 static int cortex_a8_instr_cpsr_sync(struct arm_dpm
*dpm
)
533 struct target
*target
= dpm
->arm
->target
;
534 uint32_t dscr
= DSCR_INSTR_COMP
;
536 /* "Prefetch flush" after modifying execution status in CPSR */
537 return cortex_a8_exec_opcode(target
,
538 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
542 static int cortex_a8_instr_read_data_dcc(struct arm_dpm
*dpm
,
543 uint32_t opcode
, uint32_t *data
)
545 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
547 uint32_t dscr
= DSCR_INSTR_COMP
;
549 /* the opcode, writing data to DCC */
550 retval
= cortex_a8_exec_opcode(
551 a8
->armv7a_common
.armv4_5_common
.target
,
554 if (retval
!= ERROR_OK
)
557 return cortex_a8_read_dcc(a8
, data
, &dscr
);
561 static int cortex_a8_instr_read_data_r0(struct arm_dpm
*dpm
,
562 uint32_t opcode
, uint32_t *data
)
564 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
565 uint32_t dscr
= DSCR_INSTR_COMP
;
568 /* the opcode, writing data to R0 */
569 retval
= cortex_a8_exec_opcode(
570 a8
->armv7a_common
.armv4_5_common
.target
,
573 if (retval
!= ERROR_OK
)
576 /* write R0 to DCC */
577 retval
= cortex_a8_exec_opcode(
578 a8
->armv7a_common
.armv4_5_common
.target
,
579 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
581 if (retval
!= ERROR_OK
)
584 return cortex_a8_read_dcc(a8
, data
, &dscr
);
587 static int cortex_a8_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
588 uint32_t addr
, uint32_t control
)
590 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
591 uint32_t vr
= a8
->armv7a_common
.debug_base
;
592 uint32_t cr
= a8
->armv7a_common
.debug_base
;
596 case 0 ... 15: /* breakpoints */
597 vr
+= CPUDBG_BVR_BASE
;
598 cr
+= CPUDBG_BCR_BASE
;
600 case 16 ... 31: /* watchpoints */
601 vr
+= CPUDBG_WVR_BASE
;
602 cr
+= CPUDBG_WCR_BASE
;
611 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
612 (unsigned) vr
, (unsigned) cr
);
614 retval
= cortex_a8_dap_write_memap_register_u32(dpm
->arm
->target
,
616 if (retval
!= ERROR_OK
)
618 retval
= cortex_a8_dap_write_memap_register_u32(dpm
->arm
->target
,
623 static int cortex_a8_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
625 struct cortex_a8_common
*a8
= dpm_to_a8(dpm
);
630 cr
= a8
->armv7a_common
.debug_base
+ CPUDBG_BCR_BASE
;
633 cr
= a8
->armv7a_common
.debug_base
+ CPUDBG_WCR_BASE
;
641 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr
);
643 /* clear control register */
644 return cortex_a8_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
647 static int cortex_a8_dpm_setup(struct cortex_a8_common
*a8
, uint32_t didr
)
649 struct arm_dpm
*dpm
= &a8
->armv7a_common
.dpm
;
652 dpm
->arm
= &a8
->armv7a_common
.armv4_5_common
;
655 dpm
->prepare
= cortex_a8_dpm_prepare
;
656 dpm
->finish
= cortex_a8_dpm_finish
;
658 dpm
->instr_write_data_dcc
= cortex_a8_instr_write_data_dcc
;
659 dpm
->instr_write_data_r0
= cortex_a8_instr_write_data_r0
;
660 dpm
->instr_cpsr_sync
= cortex_a8_instr_cpsr_sync
;
662 dpm
->instr_read_data_dcc
= cortex_a8_instr_read_data_dcc
;
663 dpm
->instr_read_data_r0
= cortex_a8_instr_read_data_r0
;
665 dpm
->bpwp_enable
= cortex_a8_bpwp_enable
;
666 dpm
->bpwp_disable
= cortex_a8_bpwp_disable
;
668 retval
= arm_dpm_setup(dpm
);
669 if (retval
== ERROR_OK
)
670 retval
= arm_dpm_initialize(dpm
);
677 * Cortex-A8 Run control
680 static int cortex_a8_poll(struct target
*target
)
682 int retval
= ERROR_OK
;
684 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
685 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
686 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
687 enum target_state prev_target_state
= target
->state
;
689 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
690 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
691 if (retval
!= ERROR_OK
)
695 cortex_a8
->cpudbg_dscr
= dscr
;
697 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
))
699 if (prev_target_state
!= TARGET_HALTED
)
701 /* We have a halting debug event */
702 LOG_DEBUG("Target halted");
703 target
->state
= TARGET_HALTED
;
704 if ((prev_target_state
== TARGET_RUNNING
)
705 || (prev_target_state
== TARGET_RESET
))
707 retval
= cortex_a8_debug_entry(target
);
708 if (retval
!= ERROR_OK
)
711 target_call_event_callbacks(target
,
712 TARGET_EVENT_HALTED
);
714 if (prev_target_state
== TARGET_DEBUG_RUNNING
)
718 retval
= cortex_a8_debug_entry(target
);
719 if (retval
!= ERROR_OK
)
722 target_call_event_callbacks(target
,
723 TARGET_EVENT_DEBUG_HALTED
);
727 else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
729 target
->state
= TARGET_RUNNING
;
733 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
734 target
->state
= TARGET_UNKNOWN
;
740 static int cortex_a8_halt(struct target
*target
)
742 int retval
= ERROR_OK
;
744 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
745 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
748 * Tell the core to be halted by writing DRCR with 0x1
749 * and then wait for the core to be halted.
751 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
752 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_HALT
);
753 if (retval
!= ERROR_OK
)
757 * enter halting debug mode
759 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
760 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
761 if (retval
!= ERROR_OK
)
764 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
765 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
| DSCR_HALT_DBG_MODE
);
766 if (retval
!= ERROR_OK
)
769 long long then
= timeval_ms();
772 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
773 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
774 if (retval
!= ERROR_OK
)
776 if ((dscr
& DSCR_CORE_HALTED
) != 0)
780 if (timeval_ms() > then
+ 1000)
782 LOG_ERROR("Timeout waiting for halt");
787 target
->debug_reason
= DBG_REASON_DBGRQ
;
792 static int cortex_a8_resume(struct target
*target
, int current
,
793 uint32_t address
, int handle_breakpoints
, int debug_execution
)
795 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
796 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
797 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
800 // struct breakpoint *breakpoint = NULL;
801 uint32_t resume_pc
, dscr
;
803 if (!debug_execution
)
804 target_free_all_working_areas(target
);
809 /* Disable interrupts */
810 /* We disable interrupts in the PRIMASK register instead of
811 * masking with C_MASKINTS,
812 * This is probably the same issue as Cortex-M3 Errata 377493:
813 * C_MASKINTS in parallel with disabled interrupts can cause
814 * local faults to not be taken. */
815 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].value
, 0, 32, 1);
816 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].dirty
= 1;
817 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].valid
= 1;
819 /* Make sure we are in Thumb mode */
820 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32,
821 buf_get_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32) | (1 << 24));
822 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].dirty
= 1;
823 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].valid
= 1;
827 /* current = 1: continue on current pc, otherwise continue at <address> */
828 resume_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
832 /* Make sure that the Armv7 gdb thumb fixups does not
833 * kill the return address
835 switch (armv4_5
->core_state
)
838 resume_pc
&= 0xFFFFFFFC;
840 case ARM_STATE_THUMB
:
841 case ARM_STATE_THUMB_EE
:
842 /* When the return address is loaded into PC
843 * bit 0 must be 1 to stay in Thumb state
847 case ARM_STATE_JAZELLE
:
848 LOG_ERROR("How do I resume into Jazelle state??");
851 LOG_DEBUG("resume pc = 0x%08" PRIx32
, resume_pc
);
852 buf_set_u32(armv4_5
->pc
->value
, 0, 32, resume_pc
);
853 armv4_5
->pc
->dirty
= 1;
854 armv4_5
->pc
->valid
= 1;
856 retval
= cortex_a8_restore_context(target
, handle_breakpoints
);
857 if (retval
!= ERROR_OK
)
861 /* the front-end may request us not to handle breakpoints */
862 if (handle_breakpoints
)
864 /* Single step past breakpoint at current address */
865 if ((breakpoint
= breakpoint_find(target
, resume_pc
)))
867 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
868 cortex_m3_unset_breakpoint(target
, breakpoint
);
869 cortex_m3_single_step_core(target
);
870 cortex_m3_set_breakpoint(target
, breakpoint
);
877 * Restart core and wait for it to be started. Clear ITRen and sticky
878 * exception flags: see ARMv7 ARM, C5.9.
880 * REVISIT: for single stepping, we probably want to
881 * disable IRQs by default, with optional override...
884 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
885 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
886 if (retval
!= ERROR_OK
)
889 if ((dscr
& DSCR_INSTR_COMP
) == 0)
890 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
892 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
893 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
& ~DSCR_ITR_EN
);
894 if (retval
!= ERROR_OK
)
897 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
898 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_RESTART
| DRCR_CLEAR_EXCEPTIONS
);
899 if (retval
!= ERROR_OK
)
902 long long then
= timeval_ms();
905 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
906 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
907 if (retval
!= ERROR_OK
)
909 if ((dscr
& DSCR_CORE_RESTARTED
) != 0)
911 if (timeval_ms() > then
+ 1000)
913 LOG_ERROR("Timeout waiting for resume");
918 target
->debug_reason
= DBG_REASON_NOTHALTED
;
919 target
->state
= TARGET_RUNNING
;
921 /* registers are now invalid */
922 register_cache_invalidate(armv4_5
->core_cache
);
924 if (!debug_execution
)
926 target
->state
= TARGET_RUNNING
;
927 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
928 LOG_DEBUG("target resumed at 0x%" PRIx32
, resume_pc
);
932 target
->state
= TARGET_DEBUG_RUNNING
;
933 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
934 LOG_DEBUG("target debug resumed at 0x%" PRIx32
, resume_pc
);
940 static int cortex_a8_debug_entry(struct target
*target
)
943 uint32_t regfile
[16], cpsr
, dscr
;
944 int retval
= ERROR_OK
;
945 struct working_area
*regfile_working_area
= NULL
;
946 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
947 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
948 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
949 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
952 LOG_DEBUG("dscr = 0x%08" PRIx32
, cortex_a8
->cpudbg_dscr
);
954 /* REVISIT surely we should not re-read DSCR !! */
955 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
956 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
957 if (retval
!= ERROR_OK
)
960 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
961 * imprecise data aborts get discarded by issuing a Data
962 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
965 /* Enable the ITR execution once we are in debug mode */
967 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
968 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
969 if (retval
!= ERROR_OK
)
972 /* Examine debug reason */
973 arm_dpm_report_dscr(&armv7a
->dpm
, cortex_a8
->cpudbg_dscr
);
975 /* save address of instruction that triggered the watchpoint? */
976 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
979 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
980 armv7a
->debug_base
+ CPUDBG_WFAR
,
982 if (retval
!= ERROR_OK
)
984 arm_dpm_report_wfar(&armv7a
->dpm
, wfar
);
987 /* REVISIT fast_reg_read is never set ... */
989 /* Examine target state and mode */
990 if (cortex_a8
->fast_reg_read
)
991 target_alloc_working_area(target
, 64, ®file_working_area
);
993 /* First load register acessible through core debug port*/
994 if (!regfile_working_area
)
996 retval
= arm_dpm_read_current_registers(&armv7a
->dpm
);
1000 retval
= cortex_a8_read_regs_through_mem(target
,
1001 regfile_working_area
->address
, regfile
);
1003 target_free_working_area(target
, regfile_working_area
);
1004 if (retval
!= ERROR_OK
)
1009 /* read Current PSR */
1010 retval
= cortex_a8_dap_read_coreregister_u32(target
, &cpsr
, 16);
1011 if (retval
!= ERROR_OK
)
1014 LOG_DEBUG("cpsr: %8.8" PRIx32
, cpsr
);
1016 arm_set_cpsr(armv4_5
, cpsr
);
1019 for (i
= 0; i
<= ARM_PC
; i
++)
1021 reg
= arm_reg_current(armv4_5
, i
);
1023 buf_set_u32(reg
->value
, 0, 32, regfile
[i
]);
1028 /* Fixup PC Resume Address */
1029 if (cpsr
& (1 << 5))
1031 // T bit set for Thumb or ThumbEE state
1032 regfile
[ARM_PC
] -= 4;
1037 regfile
[ARM_PC
] -= 8;
1041 buf_set_u32(reg
->value
, 0, 32, regfile
[ARM_PC
]);
1042 reg
->dirty
= reg
->valid
;
1046 /* TODO, Move this */
1047 uint32_t cp15_control_register
, cp15_cacr
, cp15_nacr
;
1048 cortex_a8_read_cp(target
, &cp15_control_register
, 15, 0, 1, 0, 0);
1049 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register
);
1051 cortex_a8_read_cp(target
, &cp15_cacr
, 15, 0, 1, 0, 2);
1052 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr
);
1054 cortex_a8_read_cp(target
, &cp15_nacr
, 15, 0, 1, 1, 2);
1055 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr
);
1058 /* Are we in an exception handler */
1059 // armv4_5->exception_number = 0;
1060 if (armv7a
->post_debug_entry
)
1062 retval
= armv7a
->post_debug_entry(target
);
1063 if (retval
!= ERROR_OK
)
1070 static int cortex_a8_post_debug_entry(struct target
*target
)
1072 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1073 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1076 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1077 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1078 0, 0, /* op1, op2 */
1079 1, 0, /* CRn, CRm */
1080 &cortex_a8
->cp15_control_reg
);
1081 if (retval
!= ERROR_OK
)
1083 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32
, cortex_a8
->cp15_control_reg
);
1085 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
1087 uint32_t cache_type_reg
;
1089 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1090 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1091 0, 1, /* op1, op2 */
1092 0, 0, /* CRn, CRm */
1094 if (retval
!= ERROR_OK
)
1096 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg
);
1098 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1099 armv4_5_identify_cache(cache_type_reg
,
1100 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
1103 armv7a
->armv4_5_mmu
.mmu_enabled
=
1104 (cortex_a8
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1105 armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
=
1106 (cortex_a8
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1107 armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
=
1108 (cortex_a8
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1113 static int cortex_a8_step(struct target
*target
, int current
, uint32_t address
,
1114 int handle_breakpoints
)
1116 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1117 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1118 struct breakpoint
*breakpoint
= NULL
;
1119 struct breakpoint stepbreakpoint
;
1123 if (target
->state
!= TARGET_HALTED
)
1125 LOG_WARNING("target not halted");
1126 return ERROR_TARGET_NOT_HALTED
;
1129 /* current = 1: continue on current pc, otherwise continue at <address> */
1133 buf_set_u32(r
->value
, 0, 32, address
);
1137 address
= buf_get_u32(r
->value
, 0, 32);
1140 /* The front-end may request us not to handle breakpoints.
1141 * But since Cortex-A8 uses breakpoint for single step,
1142 * we MUST handle breakpoints.
1144 handle_breakpoints
= 1;
1145 if (handle_breakpoints
) {
1146 breakpoint
= breakpoint_find(target
, address
);
1148 cortex_a8_unset_breakpoint(target
, breakpoint
);
1151 /* Setup single step breakpoint */
1152 stepbreakpoint
.address
= address
;
1153 stepbreakpoint
.length
= (armv4_5
->core_state
== ARM_STATE_THUMB
)
1155 stepbreakpoint
.type
= BKPT_HARD
;
1156 stepbreakpoint
.set
= 0;
1158 /* Break on IVA mismatch */
1159 cortex_a8_set_breakpoint(target
, &stepbreakpoint
, 0x04);
1161 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1163 retval
= cortex_a8_resume(target
, 1, address
, 0, 0);
1164 if (retval
!= ERROR_OK
)
1167 long long then
= timeval_ms();
1168 while (target
->state
!= TARGET_HALTED
)
1170 retval
= cortex_a8_poll(target
);
1171 if (retval
!= ERROR_OK
)
1173 if (timeval_ms() > then
+ 1000)
1175 LOG_ERROR("timeout waiting for target halt");
1180 cortex_a8_unset_breakpoint(target
, &stepbreakpoint
);
1182 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1185 cortex_a8_set_breakpoint(target
, breakpoint
, 0);
1187 if (target
->state
!= TARGET_HALTED
)
1188 LOG_DEBUG("target stepped");
1193 static int cortex_a8_restore_context(struct target
*target
, bool bpwp
)
1195 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1199 if (armv7a
->pre_restore_context
)
1200 armv7a
->pre_restore_context(target
);
1202 return arm_dpm_write_dirty_registers(&armv7a
->dpm
, bpwp
);
1207 * Cortex-A8 Breakpoint and watchpoint functions
1210 /* Setup hardware Breakpoint Register Pair */
1211 static int cortex_a8_set_breakpoint(struct target
*target
,
1212 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1217 uint8_t byte_addr_select
= 0x0F;
1218 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1219 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1220 struct cortex_a8_brp
* brp_list
= cortex_a8
->brp_list
;
1222 if (breakpoint
->set
)
1224 LOG_WARNING("breakpoint already set");
1228 if (breakpoint
->type
== BKPT_HARD
)
1230 while (brp_list
[brp_i
].used
&& (brp_i
< cortex_a8
->brp_num
))
1232 if (brp_i
>= cortex_a8
->brp_num
)
1234 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1235 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1237 breakpoint
->set
= brp_i
+ 1;
1238 if (breakpoint
->length
== 2)
1240 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1242 control
= ((matchmode
& 0x7) << 20)
1243 | (byte_addr_select
<< 5)
1245 brp_list
[brp_i
].used
= 1;
1246 brp_list
[brp_i
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1247 brp_list
[brp_i
].control
= control
;
1248 retval
= cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1249 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1250 brp_list
[brp_i
].value
);
1251 if (retval
!= ERROR_OK
)
1253 retval
= cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1254 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1255 brp_list
[brp_i
].control
);
1256 if (retval
!= ERROR_OK
)
1258 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1259 brp_list
[brp_i
].control
,
1260 brp_list
[brp_i
].value
);
1262 else if (breakpoint
->type
== BKPT_SOFT
)
1265 if (breakpoint
->length
== 2)
1267 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1271 buf_set_u32(code
, 0, 32, ARMV5_BKPT(0x11));
1273 retval
= target
->type
->read_memory(target
,
1274 breakpoint
->address
& 0xFFFFFFFE,
1275 breakpoint
->length
, 1,
1276 breakpoint
->orig_instr
);
1277 if (retval
!= ERROR_OK
)
1279 retval
= target
->type
->write_memory(target
,
1280 breakpoint
->address
& 0xFFFFFFFE,
1281 breakpoint
->length
, 1, code
);
1282 if (retval
!= ERROR_OK
)
1284 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1290 static int cortex_a8_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1293 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1294 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1295 struct cortex_a8_brp
* brp_list
= cortex_a8
->brp_list
;
1297 if (!breakpoint
->set
)
1299 LOG_WARNING("breakpoint not set");
1303 if (breakpoint
->type
== BKPT_HARD
)
1305 int brp_i
= breakpoint
->set
- 1;
1306 if ((brp_i
< 0) || (brp_i
>= cortex_a8
->brp_num
))
1308 LOG_DEBUG("Invalid BRP number in breakpoint");
1311 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1312 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1313 brp_list
[brp_i
].used
= 0;
1314 brp_list
[brp_i
].value
= 0;
1315 brp_list
[brp_i
].control
= 0;
1316 retval
= cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1317 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1318 brp_list
[brp_i
].control
);
1319 if (retval
!= ERROR_OK
)
1321 retval
= cortex_a8_dap_write_memap_register_u32(target
, armv7a
->debug_base
1322 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1323 brp_list
[brp_i
].value
);
1324 if (retval
!= ERROR_OK
)
1329 /* restore original instruction (kept in target endianness) */
1330 if (breakpoint
->length
== 4)
1332 retval
= target
->type
->write_memory(target
,
1333 breakpoint
->address
& 0xFFFFFFFE,
1334 4, 1, breakpoint
->orig_instr
);
1335 if (retval
!= ERROR_OK
)
1340 retval
= target
->type
->write_memory(target
,
1341 breakpoint
->address
& 0xFFFFFFFE,
1342 2, 1, breakpoint
->orig_instr
);
1343 if (retval
!= ERROR_OK
)
1347 breakpoint
->set
= 0;
1352 static int cortex_a8_add_breakpoint(struct target
*target
,
1353 struct breakpoint
*breakpoint
)
1355 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1357 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a8
->brp_num_available
< 1))
1359 LOG_INFO("no hardware breakpoint available");
1360 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1363 if (breakpoint
->type
== BKPT_HARD
)
1364 cortex_a8
->brp_num_available
--;
1366 return cortex_a8_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1369 static int cortex_a8_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1371 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1374 /* It is perfectly possible to remove breakpoints while the target is running */
1375 if (target
->state
!= TARGET_HALTED
)
1377 LOG_WARNING("target not halted");
1378 return ERROR_TARGET_NOT_HALTED
;
1382 if (breakpoint
->set
)
1384 cortex_a8_unset_breakpoint(target
, breakpoint
);
1385 if (breakpoint
->type
== BKPT_HARD
)
1386 cortex_a8
->brp_num_available
++ ;
1396 * Cortex-A8 Reset functions
1399 static int cortex_a8_assert_reset(struct target
*target
)
1401 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1405 /* FIXME when halt is requested, make it work somehow... */
1407 /* Issue some kind of warm reset. */
1408 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
1409 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1410 } else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1411 /* REVISIT handle "pulls" cases, if there's
1412 * hardware that needs them to work.
1414 jtag_add_reset(0, 1);
1416 LOG_ERROR("%s: how to reset?", target_name(target
));
1420 /* registers are now invalid */
1421 register_cache_invalidate(armv7a
->armv4_5_common
.core_cache
);
1423 target
->state
= TARGET_RESET
;
1428 static int cortex_a8_deassert_reset(struct target
*target
)
1434 /* be certain SRST is off */
1435 jtag_add_reset(0, 0);
1437 retval
= cortex_a8_poll(target
);
1438 if (retval
!= ERROR_OK
)
1441 if (target
->reset_halt
) {
1442 if (target
->state
!= TARGET_HALTED
) {
1443 LOG_WARNING("%s: ran after reset and before halt ...",
1444 target_name(target
));
1445 if ((retval
= target_halt(target
)) != ERROR_OK
)
1454 static int cortex_a8_write_apb_ab_memory(struct target
*target
,
1455 uint32_t address
, uint32_t size
,
1456 uint32_t count
, const uint8_t *buffer
)
1459 /* write memory through APB-AP */
1461 int retval
= ERROR_INVALID_ARGUMENTS
;
1462 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1463 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1464 int total_bytes
= count
* size
;
1465 int start_byte
, nbytes_to_write
, i
;
1472 if (target
->state
!= TARGET_HALTED
)
1474 LOG_WARNING("target not halted");
1475 return ERROR_TARGET_NOT_HALTED
;
1478 reg
= arm_reg_current(armv4_5
, 0);
1480 reg
= arm_reg_current(armv4_5
, 1);
1483 retval
= cortex_a8_dap_write_coreregister_u32(target
, address
& 0xFFFFFFFC, 0);
1484 if (retval
!= ERROR_OK
)
1487 start_byte
= address
& 0x3;
1489 while (total_bytes
> 0) {
1491 nbytes_to_write
= 4 - start_byte
;
1492 if (total_bytes
< nbytes_to_write
)
1493 nbytes_to_write
= total_bytes
;
1495 if ( nbytes_to_write
!= 4 ) {
1497 /* execute instruction LDR r1, [r0] */
1498 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_LDR(1, 0), NULL
);
1499 if (retval
!= ERROR_OK
)
1502 retval
= cortex_a8_dap_read_coreregister_u32(target
, &data
.ui
, 1);
1503 if (retval
!= ERROR_OK
)
1507 for (i
= 0; i
< nbytes_to_write
; ++i
)
1508 data
.uc_a
[i
+ start_byte
] = *buffer
++;
1510 retval
= cortex_a8_dap_write_coreregister_u32(target
, data
.ui
, 1);
1511 if (retval
!= ERROR_OK
)
1514 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1515 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_STRW_IP(1, 0) , NULL
);
1516 if (retval
!= ERROR_OK
)
1519 total_bytes
-= nbytes_to_write
;
1527 static int cortex_a8_read_apb_ab_memory(struct target
*target
,
1528 uint32_t address
, uint32_t size
,
1529 uint32_t count
, uint8_t *buffer
)
1532 /* read memory through APB-AP */
1534 int retval
= ERROR_INVALID_ARGUMENTS
;
1535 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1536 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1537 int total_bytes
= count
* size
;
1538 int start_byte
, nbytes_to_read
, i
;
1545 if (target
->state
!= TARGET_HALTED
)
1547 LOG_WARNING("target not halted");
1548 return ERROR_TARGET_NOT_HALTED
;
1551 reg
= arm_reg_current(armv4_5
, 0);
1553 reg
= arm_reg_current(armv4_5
, 1);
1556 retval
= cortex_a8_dap_write_coreregister_u32(target
, address
& 0xFFFFFFFC, 0);
1557 if (retval
!= ERROR_OK
)
1560 start_byte
= address
& 0x3;
1562 while (total_bytes
> 0) {
1564 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1565 retval
= cortex_a8_exec_opcode(target
, ARMV4_5_LDRW_IP(1, 0), NULL
);
1566 if (retval
!= ERROR_OK
)
1569 retval
= cortex_a8_dap_read_coreregister_u32(target
, &data
.ui
, 1);
1570 if (retval
!= ERROR_OK
)
1573 nbytes_to_read
= 4 - start_byte
;
1574 if (total_bytes
< nbytes_to_read
)
1575 nbytes_to_read
= total_bytes
;
1577 for (i
= 0; i
< nbytes_to_read
; ++i
)
1578 *buffer
++ = data
.uc_a
[i
+ start_byte
];
1580 total_bytes
-= nbytes_to_read
;
1590 * Cortex-A8 Memory access
1592 * This is same Cortex M3 but we must also use the correct
1593 * ap number for every access.
1596 static int cortex_a8_read_phys_memory(struct target
*target
,
1597 uint32_t address
, uint32_t size
,
1598 uint32_t count
, uint8_t *buffer
)
1600 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1601 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
1602 int retval
= ERROR_INVALID_ARGUMENTS
;
1603 uint8_t apsel
= swjdp
->apsel
;
1604 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1605 address
, size
, count
);
1607 if (count
&& buffer
) {
1609 if ( apsel
== swjdp_memoryap
) {
1611 /* read memory through AHB-AP */
1615 retval
= mem_ap_sel_read_buf_u32(swjdp
, swjdp_memoryap
,
1616 buffer
, 4 * count
, address
);
1619 retval
= mem_ap_sel_read_buf_u16(swjdp
, swjdp_memoryap
,
1620 buffer
, 2 * count
, address
);
1623 retval
= mem_ap_sel_read_buf_u8(swjdp
, swjdp_memoryap
,
1624 buffer
, count
, address
);
1630 /* read memory through APB-AP */
1633 retval
= cortex_a8_mmu(target
, &enabled
);
1634 if (retval
!= ERROR_OK
)
1639 LOG_WARNING("Reading physical memory through \
1640 APB with MMU enabled is not yet implemented");
1641 return ERROR_TARGET_FAILURE
;
1643 retval
= cortex_a8_read_apb_ab_memory(target
, address
, size
, count
, buffer
);
1649 static int cortex_a8_read_memory(struct target
*target
, uint32_t address
,
1650 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1653 uint32_t virt
, phys
;
1655 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1656 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
1657 uint8_t apsel
= swjdp
->apsel
;
1659 /* cortex_a8 handles unaligned memory access */
1660 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address
,
1662 if (apsel
== swjdp_memoryap
) {
1663 retval
= cortex_a8_mmu(target
, &enabled
);
1664 if (retval
!= ERROR_OK
)
1670 retval
= cortex_a8_virt2phys(target
, virt
, &phys
);
1671 if (retval
!= ERROR_OK
)
1674 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
1678 retval
= cortex_a8_read_phys_memory(target
, address
, size
, count
, buffer
);
1680 retval
= cortex_a8_read_apb_ab_memory(target
, address
, size
, count
, buffer
);
1685 static int cortex_a8_write_phys_memory(struct target
*target
,
1686 uint32_t address
, uint32_t size
,
1687 uint32_t count
, const uint8_t *buffer
)
1689 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1690 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
1691 int retval
= ERROR_INVALID_ARGUMENTS
;
1692 uint8_t apsel
= swjdp
->apsel
;
1694 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address
,
1697 if (count
&& buffer
) {
1699 if ( apsel
== swjdp_memoryap
) {
1701 /* write memory through AHB-AP */
1705 retval
= mem_ap_sel_write_buf_u32(swjdp
, swjdp_memoryap
,
1706 buffer
, 4 * count
, address
);
1709 retval
= mem_ap_sel_write_buf_u16(swjdp
, swjdp_memoryap
,
1710 buffer
, 2 * count
, address
);
1713 retval
= mem_ap_sel_write_buf_u8(swjdp
, swjdp_memoryap
,
1714 buffer
, count
, address
);
1720 /* write memory through APB-AP */
1723 retval
= cortex_a8_mmu(target
, &enabled
);
1724 if (retval
!= ERROR_OK
)
1729 LOG_WARNING("Writing physical memory through APB with MMU" \
1730 "enabled is not yet implemented");
1731 return ERROR_TARGET_FAILURE
;
1733 return cortex_a8_write_apb_ab_memory(target
, address
, size
, count
, buffer
);
1738 /* REVISIT this op is generic ARMv7-A/R stuff */
1739 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
)
1741 struct arm_dpm
*dpm
= armv7a
->armv4_5_common
.dpm
;
1743 retval
= dpm
->prepare(dpm
);
1744 if (retval
!= ERROR_OK
)
1747 /* The Cache handling will NOT work with MMU active, the
1748 * wrong addresses will be invalidated!
1750 * For both ICache and DCache, walk all cache lines in the
1751 * address range. Cortex-A8 has fixed 64 byte line length.
1753 * REVISIT per ARMv7, these may trigger watchpoints ...
1756 /* invalidate I-Cache */
1757 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
)
1759 /* ICIMVAU - Invalidate Cache single entry
1761 * MCR p15, 0, r0, c7, c5, 1
1763 for (uint32_t cacheline
= address
;
1764 cacheline
< address
+ size
* count
;
1766 retval
= dpm
->instr_write_data_r0(dpm
,
1767 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1769 if (retval
!= ERROR_OK
)
1774 /* invalidate D-Cache */
1775 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
)
1777 /* DCIMVAC - Invalidate data Cache line
1779 * MCR p15, 0, r0, c7, c6, 1
1781 for (uint32_t cacheline
= address
;
1782 cacheline
< address
+ size
* count
;
1784 retval
= dpm
->instr_write_data_r0(dpm
,
1785 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1787 if (retval
!= ERROR_OK
)
1792 /* (void) */ dpm
->finish(dpm
);
1798 static int cortex_a8_write_memory(struct target
*target
, uint32_t address
,
1799 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1802 uint32_t virt
, phys
;
1804 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1805 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
1806 uint8_t apsel
= swjdp
->apsel
;
1807 /* cortex_a8 handles unaligned memory access */
1808 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address
,
1810 if (apsel
== swjdp_memoryap
) {
1812 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address
, size
, count
);
1813 retval
= cortex_a8_mmu(target
, &enabled
);
1814 if (retval
!= ERROR_OK
)
1820 retval
= cortex_a8_virt2phys(target
, virt
, &phys
);
1821 if (retval
!= ERROR_OK
)
1823 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1827 retval
= cortex_a8_write_phys_memory(target
, address
, size
,
1831 retval
= cortex_a8_write_apb_ab_memory(target
, address
, size
, count
, buffer
);
1836 static int cortex_a8_bulk_write_memory(struct target
*target
, uint32_t address
,
1837 uint32_t count
, const uint8_t *buffer
)
1839 return cortex_a8_write_memory(target
, address
, 4, count
, buffer
);
1843 static int cortex_a8_handle_target_request(void *priv
)
1845 struct target
*target
= priv
;
1846 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1847 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
1850 if (!target_was_examined(target
))
1852 if (!target
->dbg_msg_enabled
)
1855 if (target
->state
== TARGET_RUNNING
)
1859 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1860 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1862 /* check if we have data */
1863 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
==ERROR_OK
))
1865 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1866 armv7a
->debug_base
+ CPUDBG_DTRTX
, &request
);
1867 if (retval
== ERROR_OK
)
1869 target_request(target
, request
);
1870 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1871 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1880 * Cortex-A8 target information and configuration
1883 static int cortex_a8_examine_first(struct target
*target
)
1885 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
1886 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
1887 struct adiv5_dap
*swjdp
= armv7a
->armv4_5_common
.dap
;
1889 int retval
= ERROR_OK
;
1890 uint32_t didr
, ctypr
, ttypr
, cpuid
;
1892 /* We do one extra read to ensure DAP is configured,
1893 * we call ahbap_debugport_init(swjdp) instead
1895 retval
= ahbap_debugport_init(swjdp
);
1896 if (retval
!= ERROR_OK
)
1899 if (!target
->dbgbase_set
)
1902 /* Get ROM Table base */
1904 retval
= dap_get_debugbase(swjdp
, 1, &dbgbase
, &apid
);
1905 if (retval
!= ERROR_OK
)
1907 /* Lookup 0x15 -- Processor DAP */
1908 retval
= dap_lookup_cs_component(swjdp
, 1, dbgbase
, 0x15,
1909 &armv7a
->debug_base
);
1910 if (retval
!= ERROR_OK
)
1915 armv7a
->debug_base
= target
->dbgbase
;
1918 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1919 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
1920 if (retval
!= ERROR_OK
)
1923 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1924 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
)) != ERROR_OK
)
1926 LOG_DEBUG("Examine %s failed", "CPUID");
1930 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1931 armv7a
->debug_base
+ CPUDBG_CTYPR
, &ctypr
)) != ERROR_OK
)
1933 LOG_DEBUG("Examine %s failed", "CTYPR");
1937 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1938 armv7a
->debug_base
+ CPUDBG_TTYPR
, &ttypr
)) != ERROR_OK
)
1940 LOG_DEBUG("Examine %s failed", "TTYPR");
1944 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1945 armv7a
->debug_base
+ CPUDBG_DIDR
, &didr
)) != ERROR_OK
)
1947 LOG_DEBUG("Examine %s failed", "DIDR");
1951 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1952 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
1953 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
1954 LOG_DEBUG("didr = 0x%08" PRIx32
, didr
);
1956 armv7a
->armv4_5_common
.core_type
= ARM_MODE_MON
;
1957 retval
= cortex_a8_dpm_setup(cortex_a8
, didr
);
1958 if (retval
!= ERROR_OK
)
1961 /* Setup Breakpoint Register Pairs */
1962 cortex_a8
->brp_num
= ((didr
>> 24) & 0x0F) + 1;
1963 cortex_a8
->brp_num_context
= ((didr
>> 20) & 0x0F) + 1;
1964 cortex_a8
->brp_num_available
= cortex_a8
->brp_num
;
1965 cortex_a8
->brp_list
= calloc(cortex_a8
->brp_num
, sizeof(struct cortex_a8_brp
));
1966 // cortex_a8->brb_enabled = ????;
1967 for (i
= 0; i
< cortex_a8
->brp_num
; i
++)
1969 cortex_a8
->brp_list
[i
].used
= 0;
1970 if (i
< (cortex_a8
->brp_num
-cortex_a8
->brp_num_context
))
1971 cortex_a8
->brp_list
[i
].type
= BRP_NORMAL
;
1973 cortex_a8
->brp_list
[i
].type
= BRP_CONTEXT
;
1974 cortex_a8
->brp_list
[i
].value
= 0;
1975 cortex_a8
->brp_list
[i
].control
= 0;
1976 cortex_a8
->brp_list
[i
].BRPn
= i
;
1979 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8
->brp_num
);
1981 target_set_examined(target
);
1985 static int cortex_a8_examine(struct target
*target
)
1987 int retval
= ERROR_OK
;
1989 /* don't re-probe hardware after each reset */
1990 if (!target_was_examined(target
))
1991 retval
= cortex_a8_examine_first(target
);
1993 /* Configure core debug access */
1994 if (retval
== ERROR_OK
)
1995 retval
= cortex_a8_init_debug_access(target
);
2001 * Cortex-A8 target creation and initialization
2004 static int cortex_a8_init_target(struct command_context
*cmd_ctx
,
2005 struct target
*target
)
2007 /* examine_first() does a bunch of this */
2011 static int cortex_a8_init_arch_info(struct target
*target
,
2012 struct cortex_a8_common
*cortex_a8
, struct jtag_tap
*tap
)
2014 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
2015 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
2016 struct adiv5_dap
*dap
= &armv7a
->dap
;
2018 armv7a
->armv4_5_common
.dap
= dap
;
2020 /* Setup struct cortex_a8_common */
2021 cortex_a8
->common_magic
= CORTEX_A8_COMMON_MAGIC
;
2022 /* tap has no dap initialized */
2025 armv7a
->armv4_5_common
.dap
= dap
;
2026 /* Setup struct cortex_a8_common */
2027 armv4_5
->arch_info
= armv7a
;
2029 /* prepare JTAG information for the new target */
2030 cortex_a8
->jtag_info
.tap
= tap
;
2031 cortex_a8
->jtag_info
.scann_size
= 4;
2033 /* Leave (only) generic DAP stuff for debugport_init() */
2034 dap
->jtag_info
= &cortex_a8
->jtag_info
;
2035 dap
->memaccess_tck
= 80;
2037 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2038 dap
->tar_autoincr_block
= (1 << 10);
2039 dap
->memaccess_tck
= 80;
2043 armv7a
->armv4_5_common
.dap
= tap
->dap
;
2045 cortex_a8
->fast_reg_read
= 0;
2047 /* Set default value */
2048 cortex_a8
->current_address_mode
= ARM_MODE_ANY
;
2050 /* register arch-specific functions */
2051 armv7a
->examine_debug_reason
= NULL
;
2053 armv7a
->post_debug_entry
= cortex_a8_post_debug_entry
;
2055 armv7a
->pre_restore_context
= NULL
;
2056 armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
2057 armv7a
->armv4_5_mmu
.get_ttb
= cortex_a8_get_ttb
;
2058 armv7a
->armv4_5_mmu
.read_memory
= cortex_a8_read_phys_memory
;
2059 armv7a
->armv4_5_mmu
.write_memory
= cortex_a8_write_phys_memory
;
2060 armv7a
->armv4_5_mmu
.disable_mmu_caches
= cortex_a8_disable_mmu_caches
;
2061 armv7a
->armv4_5_mmu
.enable_mmu_caches
= cortex_a8_enable_mmu_caches
;
2062 armv7a
->armv4_5_mmu
.has_tiny_pages
= 1;
2063 armv7a
->armv4_5_mmu
.mmu_enabled
= 0;
2066 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2068 /* REVISIT v7a setup should be in a v7a-specific routine */
2069 arm_init_arch_info(target
, armv4_5
);
2070 armv7a
->common_magic
= ARMV7_COMMON_MAGIC
;
2072 target_register_timer_callback(cortex_a8_handle_target_request
, 1, 1, target
);
2077 static int cortex_a8_target_create(struct target
*target
, Jim_Interp
*interp
)
2079 struct cortex_a8_common
*cortex_a8
= calloc(1, sizeof(struct cortex_a8_common
));
2081 return cortex_a8_init_arch_info(target
, cortex_a8
, target
->tap
);
2084 static int cortex_a8_get_ttb(struct target
*target
, uint32_t *result
)
2086 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
2087 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
2088 uint32_t ttb
= 0, retval
= ERROR_OK
;
2090 /* current_address_mode is set inside cortex_a8_virt2phys()
2091 where we can determine if address belongs to user or kernel */
2092 if(cortex_a8
->current_address_mode
== ARM_MODE_SVC
)
2094 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2095 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2096 0, 1, /* op1, op2 */
2097 2, 0, /* CRn, CRm */
2099 if (retval
!= ERROR_OK
)
2102 else if(cortex_a8
->current_address_mode
== ARM_MODE_USR
)
2104 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2105 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2106 0, 0, /* op1, op2 */
2107 2, 0, /* CRn, CRm */
2109 if (retval
!= ERROR_OK
)
2112 /* we don't know whose address is: user or kernel
2113 we assume that if we are in kernel mode then
2114 address belongs to kernel else if in user mode
2116 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_SVC
)
2118 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2119 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2120 0, 1, /* op1, op2 */
2121 2, 0, /* CRn, CRm */
2123 if (retval
!= ERROR_OK
)
2126 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_USR
)
2128 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2129 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2130 0, 0, /* op1, op2 */
2131 2, 0, /* CRn, CRm */
2133 if (retval
!= ERROR_OK
)
2136 /* finally we don't know whose ttb to use: user or kernel */
2138 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2147 static int cortex_a8_disable_mmu_caches(struct target
*target
, int mmu
,
2148 int d_u_cache
, int i_cache
)
2150 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
2151 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
2152 uint32_t cp15_control
;
2155 /* read cp15 control register */
2156 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2157 0, 0, /* op1, op2 */
2158 1, 0, /* CRn, CRm */
2160 if (retval
!= ERROR_OK
)
2165 cp15_control
&= ~0x1U
;
2168 cp15_control
&= ~0x4U
;
2171 cp15_control
&= ~0x1000U
;
2173 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2174 0, 0, /* op1, op2 */
2175 1, 0, /* CRn, CRm */
2180 static int cortex_a8_enable_mmu_caches(struct target
*target
, int mmu
,
2181 int d_u_cache
, int i_cache
)
2183 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
2184 struct armv7a_common
*armv7a
= &cortex_a8
->armv7a_common
;
2185 uint32_t cp15_control
;
2188 /* read cp15 control register */
2189 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2190 0, 0, /* op1, op2 */
2191 1, 0, /* CRn, CRm */
2193 if (retval
!= ERROR_OK
)
2197 cp15_control
|= 0x1U
;
2200 cp15_control
|= 0x4U
;
2203 cp15_control
|= 0x1000U
;
2205 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2206 0, 0, /* op1, op2 */
2207 1, 0, /* CRn, CRm */
2213 static int cortex_a8_mmu(struct target
*target
, int *enabled
)
2215 if (target
->state
!= TARGET_HALTED
) {
2216 LOG_ERROR("%s: target not halted", __func__
);
2217 return ERROR_TARGET_INVALID
;
2220 *enabled
= target_to_cortex_a8(target
)->armv7a_common
.armv4_5_mmu
.mmu_enabled
;
2224 static int cortex_a8_virt2phys(struct target
*target
,
2225 uint32_t virt
, uint32_t *phys
)
2228 struct cortex_a8_common
*cortex_a8
= target_to_cortex_a8(target
);
2229 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2230 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2232 /* We assume that virtual address is separated
2233 between user and kernel in Linux style:
2234 0x00000000-0xbfffffff - User space
2235 0xc0000000-0xffffffff - Kernel space */
2236 if( virt
< 0xc0000000 ) /* Linux user space */
2237 cortex_a8
->current_address_mode
= ARM_MODE_USR
;
2238 else /* Linux kernel */
2239 cortex_a8
->current_address_mode
= ARM_MODE_SVC
;
2241 int retval
= armv4_5_mmu_translate_va(target
,
2242 &armv7a
->armv4_5_mmu
, virt
, &cb
, &ret
);
2243 if (retval
!= ERROR_OK
)
2245 /* Reset the flag. We don't want someone else to use it by error */
2246 cortex_a8
->current_address_mode
= ARM_MODE_ANY
;
2252 COMMAND_HANDLER(cortex_a8_handle_cache_info_command
)
2254 struct target
*target
= get_current_target(CMD_CTX
);
2255 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2257 return armv4_5_handle_cache_info_command(CMD_CTX
,
2258 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
2262 COMMAND_HANDLER(cortex_a8_handle_dbginit_command
)
2264 struct target
*target
= get_current_target(CMD_CTX
);
2265 if (!target_was_examined(target
))
2267 LOG_ERROR("target not examined yet");
2271 return cortex_a8_init_debug_access(target
);
2274 static const struct command_registration cortex_a8_exec_command_handlers
[] = {
2276 .name
= "cache_info",
2277 .handler
= cortex_a8_handle_cache_info_command
,
2278 .mode
= COMMAND_EXEC
,
2279 .help
= "display information about target caches",
2283 .handler
= cortex_a8_handle_dbginit_command
,
2284 .mode
= COMMAND_EXEC
,
2285 .help
= "Initialize core debug",
2287 COMMAND_REGISTRATION_DONE
2289 static const struct command_registration cortex_a8_command_handlers
[] = {
2291 .chain
= arm_command_handlers
,
2294 .chain
= armv7a_command_handlers
,
2297 .name
= "cortex_a8",
2298 .mode
= COMMAND_ANY
,
2299 .help
= "Cortex-A8 command group",
2300 .chain
= cortex_a8_exec_command_handlers
,
2302 COMMAND_REGISTRATION_DONE
2305 struct target_type cortexa8_target
= {
2306 .name
= "cortex_a8",
2308 .poll
= cortex_a8_poll
,
2309 .arch_state
= armv7a_arch_state
,
2311 .target_request_data
= NULL
,
2313 .halt
= cortex_a8_halt
,
2314 .resume
= cortex_a8_resume
,
2315 .step
= cortex_a8_step
,
2317 .assert_reset
= cortex_a8_assert_reset
,
2318 .deassert_reset
= cortex_a8_deassert_reset
,
2319 .soft_reset_halt
= NULL
,
2321 /* REVISIT allow exporting VFP3 registers ... */
2322 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
2324 .read_memory
= cortex_a8_read_memory
,
2325 .write_memory
= cortex_a8_write_memory
,
2326 .bulk_write_memory
= cortex_a8_bulk_write_memory
,
2328 .checksum_memory
= arm_checksum_memory
,
2329 .blank_check_memory
= arm_blank_check_memory
,
2331 .run_algorithm
= armv4_5_run_algorithm
,
2333 .add_breakpoint
= cortex_a8_add_breakpoint
,
2334 .remove_breakpoint
= cortex_a8_remove_breakpoint
,
2335 .add_watchpoint
= NULL
,
2336 .remove_watchpoint
= NULL
,
2338 .commands
= cortex_a8_command_handlers
,
2339 .target_create
= cortex_a8_target_create
,
2340 .init_target
= cortex_a8_init_target
,
2341 .examine
= cortex_a8_examine
,
2343 .read_phys_memory
= cortex_a8_read_phys_memory
,
2344 .write_phys_memory
= cortex_a8_write_phys_memory
,
2345 .mmu
= cortex_a8_mmu
,
2346 .virt2phys
= cortex_a8_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)