1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
34 ***************************************************************************/
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
47 static int cortex_a9_poll(struct target
*target
);
48 static int cortex_a9_debug_entry(struct target
*target
);
49 static int cortex_a9_restore_context(struct target
*target
, bool bpwp
);
50 static int cortex_a9_set_breakpoint(struct target
*target
,
51 struct breakpoint
*breakpoint
, uint8_t matchmode
);
52 static int cortex_a9_unset_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
);
54 static int cortex_a9_dap_read_coreregister_u32(struct target
*target
,
55 uint32_t *value
, int regnum
);
56 static int cortex_a9_dap_write_coreregister_u32(struct target
*target
,
57 uint32_t value
, int regnum
);
58 static int cortex_a9_mmu(struct target
*target
, int *enabled
);
59 static int cortex_a9_virt2phys(struct target
*target
,
60 uint32_t virt
, uint32_t *phys
);
61 static int cortex_a9_disable_mmu_caches(struct target
*target
, int mmu
,
62 int d_u_cache
, int i_cache
);
63 static int cortex_a9_enable_mmu_caches(struct target
*target
, int mmu
,
64 int d_u_cache
, int i_cache
);
65 static int cortex_a9_get_ttb(struct target
*target
, uint32_t *result
);
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
80 static int cortex_a9_init_debug_access(struct target
*target
)
82 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
83 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
84 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
89 dap_ap_select(swjdp
, swjdp_debugap
);
93 /* Unlocking the debug registers for modification */
94 /* The debugport might be uninitialised so try twice */
95 retval
= mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
96 if (retval
!= ERROR_OK
)
99 retval
= mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
100 if (retval
== ERROR_OK
)
102 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
105 if (retval
!= ERROR_OK
)
107 /* Clear Sticky Power Down status Bit in PRSR to enable access to
108 the registers in the Core Power Domain */
109 retval
= mem_ap_read_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_PRSR
, &dummy
);
110 if (retval
!= ERROR_OK
)
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
115 /* Resync breakpoint registers */
117 /* Since this is likely called from init or reset, update target state information*/
118 retval
= cortex_a9_poll(target
);
121 dap_ap_select(swjdp
, saved_apsel
);
125 /* To reduce needless round-trips, pass in a pointer to the current
126 * DSCR value. Initialize it to zero if you just need to know the
127 * value on return from this function; or DSCR_INSTR_COMP if you
128 * happen to know that no instruction is pending.
130 static int cortex_a9_exec_opcode(struct target
*target
,
131 uint32_t opcode
, uint32_t *dscr_p
)
135 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
136 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
138 dscr
= dscr_p
? *dscr_p
: 0;
140 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
142 /* Wait for InstrCompl bit to be set */
143 long long then
= timeval_ms();
144 while ((dscr
& DSCR_INSTR_COMP
) == 0)
146 retval
= mem_ap_read_atomic_u32(swjdp
,
147 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
148 if (retval
!= ERROR_OK
)
150 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
153 if (timeval_ms() > then
+ 1000)
155 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
160 retval
= mem_ap_write_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_ITR
, opcode
);
161 if (retval
!= ERROR_OK
)
167 retval
= mem_ap_read_atomic_u32(swjdp
,
168 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
169 if (retval
!= ERROR_OK
)
171 LOG_ERROR("Could not read DSCR register");
174 if (timeval_ms() > then
+ 1000)
176 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
180 while ((dscr
& DSCR_INSTR_COMP
) == 0); /* Wait for InstrCompl bit to be set */
188 /**************************************************************************
189 Read core register with very few exec_opcode, fast but needs work_area.
190 This can cause problems with MMU active.
191 **************************************************************************/
192 static int cortex_a9_read_regs_through_mem(struct target
*target
, uint32_t address
,
195 int retval
= ERROR_OK
;
196 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
197 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
199 retval
= cortex_a9_dap_read_coreregister_u32(target
, regfile
, 0);
200 if (retval
!= ERROR_OK
)
202 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
203 if (retval
!= ERROR_OK
)
205 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL
);
206 if (retval
!= ERROR_OK
)
209 dap_ap_select(swjdp
, swjdp_memoryap
);
210 retval
= mem_ap_read_buf_u32(swjdp
, (uint8_t *)(®file
[1]), 4*15, address
);
211 if (retval
!= ERROR_OK
)
213 dap_ap_select(swjdp
, swjdp_debugap
);
218 static int cortex_a9_dap_read_coreregister_u32(struct target
*target
,
219 uint32_t *value
, int regnum
)
221 int retval
= ERROR_OK
;
222 uint8_t reg
= regnum
&0xFF;
224 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
225 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
232 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
233 retval
= cortex_a9_exec_opcode(target
,
234 ARMV4_5_MCR(14, 0, reg
, 0, 5, 0),
236 if (retval
!= ERROR_OK
)
241 /* "MOV r0, r15"; then move r0 to DCCTX */
242 retval
= cortex_a9_exec_opcode(target
, 0xE1A0000F, &dscr
);
243 if (retval
!= ERROR_OK
)
245 retval
= cortex_a9_exec_opcode(target
,
246 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
248 if (retval
!= ERROR_OK
)
253 /* "MRS r0, CPSR" or "MRS r0, SPSR"
254 * then move r0 to DCCTX
256 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRS(0, reg
& 1), &dscr
);
257 if (retval
!= ERROR_OK
)
259 retval
= cortex_a9_exec_opcode(target
,
260 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
262 if (retval
!= ERROR_OK
)
266 /* Wait for DTRRXfull then read DTRRTX */
267 long long then
= timeval_ms();
268 while ((dscr
& DSCR_DTR_TX_FULL
) == 0)
270 retval
= mem_ap_read_atomic_u32(swjdp
,
271 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
272 if (retval
!= ERROR_OK
)
274 if (timeval_ms() > then
+ 1000)
276 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
281 retval
= mem_ap_read_atomic_u32(swjdp
,
282 armv7a
->debug_base
+ CPUDBG_DTRTX
, value
);
283 LOG_DEBUG("read DCC 0x%08" PRIx32
, *value
);
288 static int cortex_a9_dap_write_coreregister_u32(struct target
*target
,
289 uint32_t value
, int regnum
)
291 int retval
= ERROR_OK
;
292 uint8_t Rd
= regnum
&0xFF;
294 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
295 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
297 LOG_DEBUG("register %i, value 0x%08" PRIx32
, regnum
, value
);
299 /* Check that DCCRX is not full */
300 retval
= mem_ap_read_atomic_u32(swjdp
,
301 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
302 if (retval
!= ERROR_OK
)
304 if (dscr
& DSCR_DTR_RX_FULL
)
306 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
307 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
308 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
310 if (retval
!= ERROR_OK
)
317 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
318 LOG_DEBUG("write DCC 0x%08" PRIx32
, value
);
319 retval
= mem_ap_write_u32(swjdp
,
320 armv7a
->debug_base
+ CPUDBG_DTRRX
, value
);
321 if (retval
!= ERROR_OK
)
326 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
327 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, Rd
, 0, 5, 0),
329 if (retval
!= ERROR_OK
)
334 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
337 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
339 if (retval
!= ERROR_OK
)
341 retval
= cortex_a9_exec_opcode(target
, 0xE1A0F000, &dscr
);
342 if (retval
!= ERROR_OK
)
347 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
348 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
350 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
352 if (retval
!= ERROR_OK
)
354 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MSR_GP(0, 0xF, Rd
& 1),
356 if (retval
!= ERROR_OK
)
359 /* "Prefetch flush" after modifying execution status in CPSR */
362 retval
= cortex_a9_exec_opcode(target
,
363 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
365 if (retval
!= ERROR_OK
)
373 /* Write to memory mapped registers directly with no cache or mmu handling */
374 static int cortex_a9_dap_write_memap_register_u32(struct target
*target
, uint32_t address
, uint32_t value
)
377 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
378 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
380 retval
= mem_ap_write_atomic_u32(swjdp
, address
, value
);
386 * Cortex-A9 implementation of Debug Programmer's Model
388 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
389 * so there's no need to poll for it before executing an instruction.
391 * NOTE that in several of these cases the "stall" mode might be useful.
392 * It'd let us queue a few operations together... prepare/finish might
393 * be the places to enable/disable that mode.
396 static inline struct cortex_a9_common
*dpm_to_a9(struct arm_dpm
*dpm
)
398 return container_of(dpm
, struct cortex_a9_common
, armv7a_common
.dpm
);
401 static int cortex_a9_write_dcc(struct cortex_a9_common
*a9
, uint32_t data
)
403 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
404 return mem_ap_write_u32(&a9
->armv7a_common
.dap
,
405 a9
->armv7a_common
.debug_base
+ CPUDBG_DTRRX
, data
);
408 static int cortex_a9_read_dcc(struct cortex_a9_common
*a9
, uint32_t *data
,
411 struct adiv5_dap
*swjdp
= &a9
->armv7a_common
.dap
;
412 uint32_t dscr
= DSCR_INSTR_COMP
;
418 /* Wait for DTRRXfull */
419 long long then
= timeval_ms();
420 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
421 retval
= mem_ap_read_atomic_u32(swjdp
,
422 a9
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
424 if (retval
!= ERROR_OK
)
426 if (timeval_ms() > then
+ 1000)
428 LOG_ERROR("Timeout waiting for read dcc");
433 retval
= mem_ap_read_atomic_u32(swjdp
,
434 a9
->armv7a_common
.debug_base
+ CPUDBG_DTRTX
, data
);
435 if (retval
!= ERROR_OK
)
437 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
445 static int cortex_a9_dpm_prepare(struct arm_dpm
*dpm
)
447 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
448 struct adiv5_dap
*swjdp
= &a9
->armv7a_common
.dap
;
452 /* set up invariant: INSTR_COMP is set after ever DPM operation */
453 long long then
= timeval_ms();
456 retval
= mem_ap_read_atomic_u32(swjdp
,
457 a9
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
459 if (retval
!= ERROR_OK
)
461 if ((dscr
& DSCR_INSTR_COMP
) != 0)
463 if (timeval_ms() > then
+ 1000)
465 LOG_ERROR("Timeout waiting for dpm prepare");
470 /* this "should never happen" ... */
471 if (dscr
& DSCR_DTR_RX_FULL
) {
472 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
474 retval
= cortex_a9_exec_opcode(
475 a9
->armv7a_common
.armv4_5_common
.target
,
476 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
478 if (retval
!= ERROR_OK
)
485 static int cortex_a9_dpm_finish(struct arm_dpm
*dpm
)
487 /* REVISIT what could be done here? */
491 static int cortex_a9_instr_write_data_dcc(struct arm_dpm
*dpm
,
492 uint32_t opcode
, uint32_t data
)
494 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
496 uint32_t dscr
= DSCR_INSTR_COMP
;
498 retval
= cortex_a9_write_dcc(a9
, data
);
499 if (retval
!= ERROR_OK
)
502 return cortex_a9_exec_opcode(
503 a9
->armv7a_common
.armv4_5_common
.target
,
508 static int cortex_a9_instr_write_data_r0(struct arm_dpm
*dpm
,
509 uint32_t opcode
, uint32_t data
)
511 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
512 uint32_t dscr
= DSCR_INSTR_COMP
;
515 retval
= cortex_a9_write_dcc(a9
, data
);
516 if (retval
!= ERROR_OK
)
519 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
520 retval
= cortex_a9_exec_opcode(
521 a9
->armv7a_common
.armv4_5_common
.target
,
522 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
524 if (retval
!= ERROR_OK
)
527 /* then the opcode, taking data from R0 */
528 retval
= cortex_a9_exec_opcode(
529 a9
->armv7a_common
.armv4_5_common
.target
,
536 static int cortex_a9_instr_cpsr_sync(struct arm_dpm
*dpm
)
538 struct target
*target
= dpm
->arm
->target
;
539 uint32_t dscr
= DSCR_INSTR_COMP
;
541 /* "Prefetch flush" after modifying execution status in CPSR */
542 return cortex_a9_exec_opcode(target
,
543 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
547 static int cortex_a9_instr_read_data_dcc(struct arm_dpm
*dpm
,
548 uint32_t opcode
, uint32_t *data
)
550 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
552 uint32_t dscr
= DSCR_INSTR_COMP
;
554 /* the opcode, writing data to DCC */
555 retval
= cortex_a9_exec_opcode(
556 a9
->armv7a_common
.armv4_5_common
.target
,
559 if (retval
!= ERROR_OK
)
562 return cortex_a9_read_dcc(a9
, data
, &dscr
);
566 static int cortex_a9_instr_read_data_r0(struct arm_dpm
*dpm
,
567 uint32_t opcode
, uint32_t *data
)
569 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
570 uint32_t dscr
= DSCR_INSTR_COMP
;
573 /* the opcode, writing data to R0 */
574 retval
= cortex_a9_exec_opcode(
575 a9
->armv7a_common
.armv4_5_common
.target
,
578 if (retval
!= ERROR_OK
)
581 /* write R0 to DCC */
582 retval
= cortex_a9_exec_opcode(
583 a9
->armv7a_common
.armv4_5_common
.target
,
584 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
586 if (retval
!= ERROR_OK
)
589 return cortex_a9_read_dcc(a9
, data
, &dscr
);
592 static int cortex_a9_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
593 uint32_t addr
, uint32_t control
)
595 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
596 uint32_t vr
= a9
->armv7a_common
.debug_base
;
597 uint32_t cr
= a9
->armv7a_common
.debug_base
;
601 case 0 ... 15: /* breakpoints */
602 vr
+= CPUDBG_BVR_BASE
;
603 cr
+= CPUDBG_BCR_BASE
;
605 case 16 ... 31: /* watchpoints */
606 vr
+= CPUDBG_WVR_BASE
;
607 cr
+= CPUDBG_WCR_BASE
;
616 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
617 (unsigned) vr
, (unsigned) cr
);
619 retval
= cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
,
621 if (retval
!= ERROR_OK
)
623 retval
= cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
,
628 static int cortex_a9_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
630 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
635 cr
= a9
->armv7a_common
.debug_base
+ CPUDBG_BCR_BASE
;
638 cr
= a9
->armv7a_common
.debug_base
+ CPUDBG_WCR_BASE
;
646 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr
);
648 /* clear control register */
649 return cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
652 static int cortex_a9_dpm_setup(struct cortex_a9_common
*a9
, uint32_t didr
)
654 struct arm_dpm
*dpm
= &a9
->armv7a_common
.dpm
;
657 dpm
->arm
= &a9
->armv7a_common
.armv4_5_common
;
660 dpm
->prepare
= cortex_a9_dpm_prepare
;
661 dpm
->finish
= cortex_a9_dpm_finish
;
663 dpm
->instr_write_data_dcc
= cortex_a9_instr_write_data_dcc
;
664 dpm
->instr_write_data_r0
= cortex_a9_instr_write_data_r0
;
665 dpm
->instr_cpsr_sync
= cortex_a9_instr_cpsr_sync
;
667 dpm
->instr_read_data_dcc
= cortex_a9_instr_read_data_dcc
;
668 dpm
->instr_read_data_r0
= cortex_a9_instr_read_data_r0
;
670 dpm
->bpwp_enable
= cortex_a9_bpwp_enable
;
671 dpm
->bpwp_disable
= cortex_a9_bpwp_disable
;
673 retval
= arm_dpm_setup(dpm
);
674 if (retval
== ERROR_OK
)
675 retval
= arm_dpm_initialize(dpm
);
682 * Cortex-A9 Run control
685 static int cortex_a9_poll(struct target
*target
)
687 int retval
= ERROR_OK
;
689 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
690 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
691 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
692 enum target_state prev_target_state
= target
->state
;
693 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
695 dap_ap_select(swjdp
, swjdp_debugap
);
696 retval
= mem_ap_read_atomic_u32(swjdp
,
697 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
698 if (retval
!= ERROR_OK
)
700 dap_ap_select(swjdp
, saved_apsel
);
703 cortex_a9
->cpudbg_dscr
= dscr
;
705 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
))
707 if (prev_target_state
!= TARGET_HALTED
)
709 /* We have a halting debug event */
710 LOG_DEBUG("Target halted");
711 target
->state
= TARGET_HALTED
;
712 if ((prev_target_state
== TARGET_RUNNING
)
713 || (prev_target_state
== TARGET_RESET
))
715 retval
= cortex_a9_debug_entry(target
);
716 if (retval
!= ERROR_OK
)
719 target_call_event_callbacks(target
,
720 TARGET_EVENT_HALTED
);
722 if (prev_target_state
== TARGET_DEBUG_RUNNING
)
726 retval
= cortex_a9_debug_entry(target
);
727 if (retval
!= ERROR_OK
)
730 target_call_event_callbacks(target
,
731 TARGET_EVENT_DEBUG_HALTED
);
735 else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
737 target
->state
= TARGET_RUNNING
;
741 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
742 target
->state
= TARGET_UNKNOWN
;
745 dap_ap_select(swjdp
, saved_apsel
);
750 static int cortex_a9_halt(struct target
*target
)
752 int retval
= ERROR_OK
;
754 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
755 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
756 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
757 dap_ap_select(swjdp
, swjdp_debugap
);
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
763 retval
= mem_ap_write_atomic_u32(swjdp
,
764 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_HALT
);
765 if (retval
!= ERROR_OK
)
769 * enter halting debug mode
771 retval
= mem_ap_read_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
772 if (retval
!= ERROR_OK
)
775 retval
= mem_ap_write_atomic_u32(swjdp
,
776 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
| DSCR_HALT_DBG_MODE
);
777 if (retval
!= ERROR_OK
)
780 long long then
= timeval_ms();
783 retval
= mem_ap_read_atomic_u32(swjdp
,
784 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
785 if (retval
!= ERROR_OK
)
787 if ((dscr
& DSCR_CORE_HALTED
) != 0)
791 if (timeval_ms() > then
+ 1000)
793 LOG_ERROR("Timeout waiting for halt");
798 target
->debug_reason
= DBG_REASON_DBGRQ
;
801 dap_ap_select(swjdp
, saved_apsel
);
805 static int cortex_a9_resume(struct target
*target
, int current
,
806 uint32_t address
, int handle_breakpoints
, int debug_execution
)
808 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
809 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
810 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
813 // struct breakpoint *breakpoint = NULL;
814 uint32_t resume_pc
, dscr
;
816 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
817 dap_ap_select(swjdp
, swjdp_debugap
);
819 if (!debug_execution
)
820 target_free_all_working_areas(target
);
825 /* Disable interrupts */
826 /* We disable interrupts in the PRIMASK register instead of
827 * masking with C_MASKINTS,
828 * This is probably the same issue as Cortex-M3 Errata 377493:
829 * C_MASKINTS in parallel with disabled interrupts can cause
830 * local faults to not be taken. */
831 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].value
, 0, 32, 1);
832 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].dirty
= 1;
833 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].valid
= 1;
835 /* Make sure we are in Thumb mode */
836 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32,
837 buf_get_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32) | (1 << 24));
838 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].dirty
= 1;
839 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].valid
= 1;
843 /* current = 1: continue on current pc, otherwise continue at <address> */
844 resume_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
848 /* Make sure that the Armv7 gdb thumb fixups does not
849 * kill the return address
851 switch (armv4_5
->core_state
)
854 resume_pc
&= 0xFFFFFFFC;
856 case ARM_STATE_THUMB
:
857 case ARM_STATE_THUMB_EE
:
858 /* When the return address is loaded into PC
859 * bit 0 must be 1 to stay in Thumb state
863 case ARM_STATE_JAZELLE
:
864 LOG_ERROR("How do I resume into Jazelle state??");
867 LOG_DEBUG("resume pc = 0x%08" PRIx32
, resume_pc
);
868 buf_set_u32(armv4_5
->pc
->value
, 0, 32, resume_pc
);
869 armv4_5
->pc
->dirty
= 1;
870 armv4_5
->pc
->valid
= 1;
872 retval
= cortex_a9_restore_context(target
, handle_breakpoints
);
873 if (retval
!= ERROR_OK
)
877 /* the front-end may request us not to handle breakpoints */
878 if (handle_breakpoints
)
880 /* Single step past breakpoint at current address */
881 if ((breakpoint
= breakpoint_find(target
, resume_pc
)))
883 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
884 cortex_m3_unset_breakpoint(target
, breakpoint
);
885 cortex_m3_single_step_core(target
);
886 cortex_m3_set_breakpoint(target
, breakpoint
);
893 * Restart core and wait for it to be started. Clear ITRen and sticky
894 * exception flags: see ARMv7 ARM, C5.9.
896 * REVISIT: for single stepping, we probably want to
897 * disable IRQs by default, with optional override...
900 retval
= mem_ap_read_atomic_u32(swjdp
,
901 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
902 if (retval
!= ERROR_OK
)
905 if ((dscr
& DSCR_INSTR_COMP
) == 0)
906 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
908 retval
= mem_ap_write_atomic_u32(swjdp
,
909 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
& ~DSCR_ITR_EN
);
910 if (retval
!= ERROR_OK
)
913 retval
= mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_DRCR
,
914 DRCR_RESTART
| DRCR_CLEAR_EXCEPTIONS
);
915 if (retval
!= ERROR_OK
)
918 long long then
= timeval_ms();
921 retval
= mem_ap_read_atomic_u32(swjdp
,
922 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
923 if (retval
!= ERROR_OK
)
925 if ((dscr
& DSCR_CORE_RESTARTED
) != 0)
927 if (timeval_ms() > then
+ 1000)
929 LOG_ERROR("Timeout waiting for resume");
934 target
->debug_reason
= DBG_REASON_NOTHALTED
;
935 target
->state
= TARGET_RUNNING
;
937 /* registers are now invalid */
938 register_cache_invalidate(armv4_5
->core_cache
);
940 if (!debug_execution
)
942 target
->state
= TARGET_RUNNING
;
943 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
944 LOG_DEBUG("target resumed at 0x%" PRIx32
, resume_pc
);
948 target
->state
= TARGET_DEBUG_RUNNING
;
949 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
950 LOG_DEBUG("target debug resumed at 0x%" PRIx32
, resume_pc
);
953 dap_ap_select(swjdp
, saved_apsel
);
958 static int cortex_a9_debug_entry(struct target
*target
)
961 uint32_t regfile
[16], cpsr
, dscr
;
962 int retval
= ERROR_OK
;
963 struct working_area
*regfile_working_area
= NULL
;
964 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
965 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
966 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
967 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
970 LOG_DEBUG("dscr = 0x%08" PRIx32
, cortex_a9
->cpudbg_dscr
);
972 /* REVISIT surely we should not re-read DSCR !! */
973 retval
= mem_ap_read_atomic_u32(swjdp
,
974 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
975 if (retval
!= ERROR_OK
)
978 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
979 * imprecise data aborts get discarded by issuing a Data
980 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
983 /* Enable the ITR execution once we are in debug mode */
985 retval
= mem_ap_write_atomic_u32(swjdp
,
986 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
987 if (retval
!= ERROR_OK
)
990 /* Examine debug reason */
991 arm_dpm_report_dscr(&armv7a
->dpm
, cortex_a9
->cpudbg_dscr
);
993 /* save address of instruction that triggered the watchpoint? */
994 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
997 retval
= mem_ap_read_atomic_u32(swjdp
,
998 armv7a
->debug_base
+ CPUDBG_WFAR
,
1000 if (retval
!= ERROR_OK
)
1002 arm_dpm_report_wfar(&armv7a
->dpm
, wfar
);
1005 /* REVISIT fast_reg_read is never set ... */
1007 /* Examine target state and mode */
1008 if (cortex_a9
->fast_reg_read
)
1009 target_alloc_working_area(target
, 64, ®file_working_area
);
1011 /* First load register acessible through core debug port*/
1012 if (!regfile_working_area
)
1014 retval
= arm_dpm_read_current_registers(&armv7a
->dpm
);
1018 dap_ap_select(swjdp
, swjdp_memoryap
);
1019 retval
= cortex_a9_read_regs_through_mem(target
,
1020 regfile_working_area
->address
, regfile
);
1021 dap_ap_select(swjdp
, swjdp_memoryap
);
1022 target_free_working_area(target
, regfile_working_area
);
1023 if (retval
!= ERROR_OK
)
1028 /* read Current PSR */
1029 retval
= cortex_a9_dap_read_coreregister_u32(target
, &cpsr
, 16);
1030 if (retval
!= ERROR_OK
)
1032 dap_ap_select(swjdp
, swjdp_debugap
);
1033 LOG_DEBUG("cpsr: %8.8" PRIx32
, cpsr
);
1035 arm_set_cpsr(armv4_5
, cpsr
);
1038 for (i
= 0; i
<= ARM_PC
; i
++)
1040 reg
= arm_reg_current(armv4_5
, i
);
1042 buf_set_u32(reg
->value
, 0, 32, regfile
[i
]);
1047 /* Fixup PC Resume Address */
1048 if (cpsr
& (1 << 5))
1050 // T bit set for Thumb or ThumbEE state
1051 regfile
[ARM_PC
] -= 4;
1056 regfile
[ARM_PC
] -= 8;
1060 buf_set_u32(reg
->value
, 0, 32, regfile
[ARM_PC
]);
1061 reg
->dirty
= reg
->valid
;
1065 /* TODO, Move this */
1066 uint32_t cp15_control_register
, cp15_cacr
, cp15_nacr
;
1067 cortex_a9_read_cp(target
, &cp15_control_register
, 15, 0, 1, 0, 0);
1068 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register
);
1070 cortex_a9_read_cp(target
, &cp15_cacr
, 15, 0, 1, 0, 2);
1071 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr
);
1073 cortex_a9_read_cp(target
, &cp15_nacr
, 15, 0, 1, 1, 2);
1074 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr
);
1077 /* Are we in an exception handler */
1078 // armv4_5->exception_number = 0;
1079 if (armv7a
->post_debug_entry
)
1081 retval
= armv7a
->post_debug_entry(target
);
1082 if (retval
!= ERROR_OK
)
1089 static int cortex_a9_post_debug_entry(struct target
*target
)
1091 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1092 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1095 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1096 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1097 0, 0, /* op1, op2 */
1098 1, 0, /* CRn, CRm */
1099 &cortex_a9
->cp15_control_reg
);
1100 if (retval
!= ERROR_OK
)
1102 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32
, cortex_a9
->cp15_control_reg
);
1104 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
1106 uint32_t cache_type_reg
;
1108 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1109 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1110 0, 1, /* op1, op2 */
1111 0, 0, /* CRn, CRm */
1113 if (retval
!= ERROR_OK
)
1115 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg
);
1117 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1118 armv4_5_identify_cache(cache_type_reg
,
1119 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
1122 armv7a
->armv4_5_mmu
.mmu_enabled
=
1123 (cortex_a9
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1124 armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
=
1125 (cortex_a9
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1126 armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
=
1127 (cortex_a9
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1132 static int cortex_a9_step(struct target
*target
, int current
, uint32_t address
,
1133 int handle_breakpoints
)
1135 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1136 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1137 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1138 struct breakpoint
*breakpoint
= NULL
;
1139 struct breakpoint stepbreakpoint
;
1142 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
1144 if (target
->state
!= TARGET_HALTED
)
1146 LOG_WARNING("target not halted");
1147 return ERROR_TARGET_NOT_HALTED
;
1150 dap_ap_select(swjdp
, swjdp_debugap
);
1152 /* current = 1: continue on current pc, otherwise continue at <address> */
1156 buf_set_u32(r
->value
, 0, 32, address
);
1160 address
= buf_get_u32(r
->value
, 0, 32);
1163 /* The front-end may request us not to handle breakpoints.
1164 * But since Cortex-A9 uses breakpoint for single step,
1165 * we MUST handle breakpoints.
1167 handle_breakpoints
= 1;
1168 if (handle_breakpoints
) {
1169 breakpoint
= breakpoint_find(target
, address
);
1171 cortex_a9_unset_breakpoint(target
, breakpoint
);
1174 /* Setup single step breakpoint */
1175 stepbreakpoint
.address
= address
;
1176 stepbreakpoint
.length
= (armv4_5
->core_state
== ARM_STATE_THUMB
)
1178 stepbreakpoint
.type
= BKPT_HARD
;
1179 stepbreakpoint
.set
= 0;
1181 /* Break on IVA mismatch */
1182 cortex_a9_set_breakpoint(target
, &stepbreakpoint
, 0x04);
1184 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1186 retval
= cortex_a9_resume(target
, 1, address
, 0, 0);
1187 if (retval
!= ERROR_OK
)
1190 long long then
= timeval_ms();
1191 while (target
->state
!= TARGET_HALTED
)
1193 retval
= cortex_a9_poll(target
);
1194 if (retval
!= ERROR_OK
)
1196 if (timeval_ms() > then
+ 1000)
1198 LOG_ERROR("timeout waiting for target halt");
1199 retval
= ERROR_FAIL
;
1204 cortex_a9_unset_breakpoint(target
, &stepbreakpoint
);
1206 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1209 cortex_a9_set_breakpoint(target
, breakpoint
, 0);
1211 if (target
->state
!= TARGET_HALTED
)
1212 LOG_DEBUG("target stepped");
1217 dap_ap_select(swjdp
, saved_apsel
);
1221 static int cortex_a9_restore_context(struct target
*target
, bool bpwp
)
1223 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1227 if (armv7a
->pre_restore_context
)
1228 armv7a
->pre_restore_context(target
);
1230 return arm_dpm_write_dirty_registers(&armv7a
->dpm
, bpwp
);
1235 * Cortex-A9 Breakpoint and watchpoint functions
1238 /* Setup hardware Breakpoint Register Pair */
1239 static int cortex_a9_set_breakpoint(struct target
*target
,
1240 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1245 uint8_t byte_addr_select
= 0x0F;
1246 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1247 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1248 struct cortex_a9_brp
* brp_list
= cortex_a9
->brp_list
;
1250 if (breakpoint
->set
)
1252 LOG_WARNING("breakpoint already set");
1256 if (breakpoint
->type
== BKPT_HARD
)
1258 while (brp_list
[brp_i
].used
&& (brp_i
< cortex_a9
->brp_num
))
1260 if (brp_i
>= cortex_a9
->brp_num
)
1262 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1265 breakpoint
->set
= brp_i
+ 1;
1266 if (breakpoint
->length
== 2)
1268 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1270 control
= ((matchmode
& 0x7) << 20)
1271 | (byte_addr_select
<< 5)
1273 brp_list
[brp_i
].used
= 1;
1274 brp_list
[brp_i
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1275 brp_list
[brp_i
].control
= control
;
1276 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1277 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1278 brp_list
[brp_i
].value
);
1279 if (retval
!= ERROR_OK
)
1281 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1282 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1283 brp_list
[brp_i
].control
);
1284 if (retval
!= ERROR_OK
)
1286 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1287 brp_list
[brp_i
].control
,
1288 brp_list
[brp_i
].value
);
1290 else if (breakpoint
->type
== BKPT_SOFT
)
1293 if (breakpoint
->length
== 2)
1295 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1299 buf_set_u32(code
, 0, 32, ARMV5_BKPT(0x11));
1301 retval
= target
->type
->read_memory(target
,
1302 breakpoint
->address
& 0xFFFFFFFE,
1303 breakpoint
->length
, 1,
1304 breakpoint
->orig_instr
);
1305 if (retval
!= ERROR_OK
)
1307 retval
= target
->type
->write_memory(target
,
1308 breakpoint
->address
& 0xFFFFFFFE,
1309 breakpoint
->length
, 1, code
);
1310 if (retval
!= ERROR_OK
)
1312 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1318 static int cortex_a9_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1321 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1322 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1323 struct cortex_a9_brp
* brp_list
= cortex_a9
->brp_list
;
1325 if (!breakpoint
->set
)
1327 LOG_WARNING("breakpoint not set");
1331 if (breakpoint
->type
== BKPT_HARD
)
1333 int brp_i
= breakpoint
->set
- 1;
1334 if ((brp_i
< 0) || (brp_i
>= cortex_a9
->brp_num
))
1336 LOG_DEBUG("Invalid BRP number in breakpoint");
1339 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1340 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1341 brp_list
[brp_i
].used
= 0;
1342 brp_list
[brp_i
].value
= 0;
1343 brp_list
[brp_i
].control
= 0;
1344 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1345 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1346 brp_list
[brp_i
].control
);
1347 if (retval
!= ERROR_OK
)
1349 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1350 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1351 brp_list
[brp_i
].value
);
1352 if (retval
!= ERROR_OK
)
1357 /* restore original instruction (kept in target endianness) */
1358 if (breakpoint
->length
== 4)
1360 retval
= target
->type
->write_memory(target
,
1361 breakpoint
->address
& 0xFFFFFFFE,
1362 4, 1, breakpoint
->orig_instr
);
1363 if (retval
!= ERROR_OK
)
1368 retval
= target
->type
->write_memory(target
,
1369 breakpoint
->address
& 0xFFFFFFFE,
1370 2, 1, breakpoint
->orig_instr
);
1371 if (retval
!= ERROR_OK
)
1375 breakpoint
->set
= 0;
1380 static int cortex_a9_add_breakpoint(struct target
*target
,
1381 struct breakpoint
*breakpoint
)
1383 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1385 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a9
->brp_num_available
< 1))
1387 LOG_INFO("no hardware breakpoint available");
1388 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1391 if (breakpoint
->type
== BKPT_HARD
)
1392 cortex_a9
->brp_num_available
--;
1394 return cortex_a9_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1397 static int cortex_a9_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1399 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1402 /* It is perfectly possible to remove breakpoints while the target is running */
1403 if (target
->state
!= TARGET_HALTED
)
1405 LOG_WARNING("target not halted");
1406 return ERROR_TARGET_NOT_HALTED
;
1410 if (breakpoint
->set
)
1412 cortex_a9_unset_breakpoint(target
, breakpoint
);
1413 if (breakpoint
->type
== BKPT_HARD
)
1414 cortex_a9
->brp_num_available
++ ;
1424 * Cortex-A9 Reset functions
1427 static int cortex_a9_assert_reset(struct target
*target
)
1429 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1433 /* FIXME when halt is requested, make it work somehow... */
1435 /* Issue some kind of warm reset. */
1436 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
1437 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1438 } else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1439 /* REVISIT handle "pulls" cases, if there's
1440 * hardware that needs them to work.
1442 jtag_add_reset(0, 1);
1444 LOG_ERROR("%s: how to reset?", target_name(target
));
1448 /* registers are now invalid */
1449 register_cache_invalidate(armv7a
->armv4_5_common
.core_cache
);
1451 target
->state
= TARGET_RESET
;
1456 static int cortex_a9_deassert_reset(struct target
*target
)
1462 /* be certain SRST is off */
1463 jtag_add_reset(0, 0);
1465 retval
= cortex_a9_poll(target
);
1466 if (retval
!= ERROR_OK
)
1469 if (target
->reset_halt
) {
1470 if (target
->state
!= TARGET_HALTED
) {
1471 LOG_WARNING("%s: ran after reset and before halt ...",
1472 target_name(target
));
1473 if ((retval
= target_halt(target
)) != ERROR_OK
)
1482 * Cortex-A9 Memory access
1484 * This is same Cortex M3 but we must also use the correct
1485 * ap number for every access.
1488 static int cortex_a9_read_phys_memory(struct target
*target
,
1489 uint32_t address
, uint32_t size
,
1490 uint32_t count
, uint8_t *buffer
)
1492 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1493 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1494 int retval
= ERROR_INVALID_ARGUMENTS
;
1495 uint8_t apsel
= dap_ap_get_select(swjdp
);
1497 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address
, size
, count
);
1499 if (count
&& buffer
) {
1501 if ( apsel
== swjdp_memoryap
) {
1503 /* read memory through AHB-AP */
1507 retval
= mem_ap_read_buf_u32(swjdp
, buffer
, 4 * count
, address
);
1510 retval
= mem_ap_read_buf_u16(swjdp
, buffer
, 2 * count
, address
);
1513 retval
= mem_ap_read_buf_u8(swjdp
, buffer
, count
, address
);
1519 /* read memory through APB-AP */
1521 uint32_t saved_r0
, saved_r1
;
1522 int nbytes
= count
* size
;
1525 /* save registers r0 and r1, we are going to corrupt them */
1526 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r0
, 0);
1527 if (retval
!= ERROR_OK
)
1530 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r1
, 1);
1531 if (retval
!= ERROR_OK
)
1534 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
1535 if (retval
!= ERROR_OK
)
1538 while (nbytes
> 0) {
1540 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1541 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_LDRB_IP(1, 0) , NULL
);
1542 if (retval
!= ERROR_OK
)
1545 retval
= cortex_a9_dap_read_coreregister_u32(target
, &data
, 1);
1546 if (retval
!= ERROR_OK
)
1554 /* restore corrupted registers r0 and r1 */
1555 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r0
, 0);
1556 if (retval
!= ERROR_OK
)
1559 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r1
, 1);
1560 if (retval
!= ERROR_OK
)
1569 static int cortex_a9_read_memory(struct target
*target
, uint32_t address
,
1570 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1573 uint32_t virt
, phys
;
1576 /* cortex_a9 handles unaligned memory access */
1578 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address
, size
, count
);
1579 retval
= cortex_a9_mmu(target
, &enabled
);
1580 if (retval
!= ERROR_OK
)
1586 retval
= cortex_a9_virt2phys(target
, virt
, &phys
);
1587 if (retval
!= ERROR_OK
)
1590 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1594 return cortex_a9_read_phys_memory(target
, address
, size
, count
, buffer
);
1597 static int cortex_a9_write_phys_memory(struct target
*target
,
1598 uint32_t address
, uint32_t size
,
1599 uint32_t count
, uint8_t *buffer
)
1601 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1602 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1603 int retval
= ERROR_INVALID_ARGUMENTS
;
1605 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address
, size
, count
);
1607 if (count
&& buffer
) {
1608 uint8_t apsel
= dap_ap_get_select(swjdp
);
1610 if ( apsel
== swjdp_memoryap
) {
1612 /* write memory through AHB-AP */
1615 retval
= mem_ap_write_buf_u32(swjdp
, buffer
, 4 * count
, address
);
1618 retval
= mem_ap_write_buf_u16(swjdp
, buffer
, 2 * count
, address
);
1621 retval
= mem_ap_write_buf_u8(swjdp
, buffer
, count
, address
);
1627 /* write memory through APB-AP */
1629 uint32_t saved_r0
, saved_r1
;
1630 int nbytes
= count
* size
;
1633 /* save registers r0 and r1, we are going to corrupt them */
1634 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r0
, 0);
1635 if (retval
!= ERROR_OK
)
1638 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r1
, 1);
1639 if (retval
!= ERROR_OK
)
1642 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
1643 if (retval
!= ERROR_OK
)
1646 while (nbytes
> 0) {
1650 retval
= cortex_a9_dap_write_coreregister_u32(target
, data
, 1);
1651 if (retval
!= ERROR_OK
)
1654 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1655 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_STRB_IP(1, 0) , NULL
);
1656 if (retval
!= ERROR_OK
)
1662 /* restore corrupted registers r0 and r1 */
1663 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r0
, 0);
1664 if (retval
!= ERROR_OK
)
1667 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r1
, 1);
1668 if (retval
!= ERROR_OK
)
1671 /* we can return here without invalidating D/I-cache because */
1672 /* access through APB maintains cache coherency */
1678 /* REVISIT this op is generic ARMv7-A/R stuff */
1679 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
)
1681 struct arm_dpm
*dpm
= armv7a
->armv4_5_common
.dpm
;
1683 retval
= dpm
->prepare(dpm
);
1684 if (retval
!= ERROR_OK
)
1687 /* The Cache handling will NOT work with MMU active, the
1688 * wrong addresses will be invalidated!
1690 * For both ICache and DCache, walk all cache lines in the
1691 * address range. Cortex-A9 has fixed 64 byte line length.
1693 * REVISIT per ARMv7, these may trigger watchpoints ...
1696 /* invalidate I-Cache */
1697 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
)
1699 /* ICIMVAU - Invalidate Cache single entry
1701 * MCR p15, 0, r0, c7, c5, 1
1703 for (uint32_t cacheline
= address
;
1704 cacheline
< address
+ size
* count
;
1706 retval
= dpm
->instr_write_data_r0(dpm
,
1707 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1709 if (retval
!= ERROR_OK
)
1714 /* invalidate D-Cache */
1715 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
)
1717 /* DCIMVAC - Invalidate data Cache line
1719 * MCR p15, 0, r0, c7, c6, 1
1721 for (uint32_t cacheline
= address
;
1722 cacheline
< address
+ size
* count
;
1724 retval
= dpm
->instr_write_data_r0(dpm
,
1725 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1727 if (retval
!= ERROR_OK
)
1732 /* (void) */ dpm
->finish(dpm
);
1738 static int cortex_a9_write_memory(struct target
*target
, uint32_t address
,
1739 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1742 uint32_t virt
, phys
;
1745 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address
, size
, count
);
1746 retval
= cortex_a9_mmu(target
, &enabled
);
1747 if (retval
!= ERROR_OK
)
1753 retval
= cortex_a9_virt2phys(target
, virt
, &phys
);
1754 if (retval
!= ERROR_OK
)
1756 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1760 return cortex_a9_write_phys_memory(target
, address
, size
,
1764 static int cortex_a9_bulk_write_memory(struct target
*target
, uint32_t address
,
1765 uint32_t count
, uint8_t *buffer
)
1767 return cortex_a9_write_memory(target
, address
, 4, count
, buffer
);
1770 static int cortex_a9_dcc_read(struct adiv5_dap
*swjdp
, uint8_t *value
, uint8_t *ctrl
)
1775 mem_ap_read_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1776 *ctrl
= (uint8_t)dcrdr
;
1777 *value
= (uint8_t)(dcrdr
>> 8);
1779 LOG_DEBUG("data 0x%x ctrl 0x%x", *value
, *ctrl
);
1781 /* write ack back to software dcc register
1782 * signify we have read data */
1783 if (dcrdr
& (1 << 0))
1786 mem_ap_write_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1793 static int cortex_a9_handle_target_request(void *priv
)
1795 struct target
*target
= priv
;
1796 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1797 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1800 if (!target_was_examined(target
))
1802 if (!target
->dbg_msg_enabled
)
1805 if (target
->state
== TARGET_RUNNING
)
1810 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1811 if (retval
!= ERROR_OK
)
1814 /* check if we have data */
1815 if (ctrl
& (1 << 0))
1819 /* we assume target is quick enough */
1821 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1822 if (retval
!= ERROR_OK
)
1824 request
|= (data
<< 8);
1825 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1826 if (retval
!= ERROR_OK
)
1828 request
|= (data
<< 16);
1829 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1830 if (retval
!= ERROR_OK
)
1832 request
|= (data
<< 24);
1833 target_request(target
, request
);
1841 * Cortex-A9 target information and configuration
1844 static int cortex_a9_examine_first(struct target
*target
)
1846 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1847 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1848 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1850 int retval
= ERROR_OK
;
1851 uint32_t didr
, ctypr
, ttypr
, cpuid
;
1853 /* We do one extra read to ensure DAP is configured,
1854 * we call ahbap_debugport_init(swjdp) instead
1856 retval
= ahbap_debugport_init(swjdp
);
1857 if (retval
!= ERROR_OK
)
1860 dap_ap_select(swjdp
, swjdp_debugap
);
1863 * FIXME: assuming omap4430
1865 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1866 * 0x80000000 is cpu0 coresight region
1868 if (target
->coreid
> 3) {
1869 LOG_ERROR("cortex_a9 supports up to 4 cores");
1870 return ERROR_INVALID_ARGUMENTS
;
1872 armv7a
->debug_base
= 0x80000000 |
1873 ((target
->coreid
& 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT
);
1875 retval
= mem_ap_read_atomic_u32(swjdp
,
1876 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
1877 if (retval
!= ERROR_OK
)
1880 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1881 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
)) != ERROR_OK
)
1883 LOG_DEBUG("Examine %s failed", "CPUID");
1887 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1888 armv7a
->debug_base
+ CPUDBG_CTYPR
, &ctypr
)) != ERROR_OK
)
1890 LOG_DEBUG("Examine %s failed", "CTYPR");
1894 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1895 armv7a
->debug_base
+ CPUDBG_TTYPR
, &ttypr
)) != ERROR_OK
)
1897 LOG_DEBUG("Examine %s failed", "TTYPR");
1901 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1902 armv7a
->debug_base
+ CPUDBG_DIDR
, &didr
)) != ERROR_OK
)
1904 LOG_DEBUG("Examine %s failed", "DIDR");
1908 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1909 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
1910 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
1911 LOG_DEBUG("didr = 0x%08" PRIx32
, didr
);
1913 armv7a
->armv4_5_common
.core_type
= ARM_MODE_MON
;
1914 retval
= cortex_a9_dpm_setup(cortex_a9
, didr
);
1915 if (retval
!= ERROR_OK
)
1918 /* Setup Breakpoint Register Pairs */
1919 cortex_a9
->brp_num
= ((didr
>> 24) & 0x0F) + 1;
1920 cortex_a9
->brp_num_context
= ((didr
>> 20) & 0x0F) + 1;
1921 cortex_a9
->brp_num_available
= cortex_a9
->brp_num
;
1922 cortex_a9
->brp_list
= calloc(cortex_a9
->brp_num
, sizeof(struct cortex_a9_brp
));
1923 // cortex_a9->brb_enabled = ????;
1924 for (i
= 0; i
< cortex_a9
->brp_num
; i
++)
1926 cortex_a9
->brp_list
[i
].used
= 0;
1927 if (i
< (cortex_a9
->brp_num
-cortex_a9
->brp_num_context
))
1928 cortex_a9
->brp_list
[i
].type
= BRP_NORMAL
;
1930 cortex_a9
->brp_list
[i
].type
= BRP_CONTEXT
;
1931 cortex_a9
->brp_list
[i
].value
= 0;
1932 cortex_a9
->brp_list
[i
].control
= 0;
1933 cortex_a9
->brp_list
[i
].BRPn
= i
;
1936 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9
->brp_num
);
1938 target_set_examined(target
);
1942 static int cortex_a9_examine(struct target
*target
)
1944 int retval
= ERROR_OK
;
1946 /* don't re-probe hardware after each reset */
1947 if (!target_was_examined(target
))
1948 retval
= cortex_a9_examine_first(target
);
1950 /* Configure core debug access */
1951 if (retval
== ERROR_OK
)
1952 retval
= cortex_a9_init_debug_access(target
);
1958 * Cortex-A9 target creation and initialization
1961 static int cortex_a9_init_target(struct command_context
*cmd_ctx
,
1962 struct target
*target
)
1964 /* examine_first() does a bunch of this */
1968 static int cortex_a9_init_arch_info(struct target
*target
,
1969 struct cortex_a9_common
*cortex_a9
, struct jtag_tap
*tap
)
1971 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1972 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1973 struct adiv5_dap
*dap
= &armv7a
->dap
;
1975 armv7a
->armv4_5_common
.dap
= dap
;
1977 /* Setup struct cortex_a9_common */
1978 cortex_a9
->common_magic
= CORTEX_A9_COMMON_MAGIC
;
1979 armv4_5
->arch_info
= armv7a
;
1981 /* prepare JTAG information for the new target */
1982 cortex_a9
->jtag_info
.tap
= tap
;
1983 cortex_a9
->jtag_info
.scann_size
= 4;
1985 /* Leave (only) generic DAP stuff for debugport_init() */
1986 dap
->jtag_info
= &cortex_a9
->jtag_info
;
1987 dap
->memaccess_tck
= 80;
1989 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1990 dap
->tar_autoincr_block
= (1 << 10);
1992 cortex_a9
->fast_reg_read
= 0;
1994 /* Set default value */
1995 cortex_a9
->current_address_mode
= ARM_MODE_ANY
;
1997 /* register arch-specific functions */
1998 armv7a
->examine_debug_reason
= NULL
;
2000 armv7a
->post_debug_entry
= cortex_a9_post_debug_entry
;
2002 armv7a
->pre_restore_context
= NULL
;
2003 armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
2004 armv7a
->armv4_5_mmu
.get_ttb
= cortex_a9_get_ttb
;
2005 armv7a
->armv4_5_mmu
.read_memory
= cortex_a9_read_phys_memory
;
2006 armv7a
->armv4_5_mmu
.write_memory
= cortex_a9_write_phys_memory
;
2007 armv7a
->armv4_5_mmu
.disable_mmu_caches
= cortex_a9_disable_mmu_caches
;
2008 armv7a
->armv4_5_mmu
.enable_mmu_caches
= cortex_a9_enable_mmu_caches
;
2009 armv7a
->armv4_5_mmu
.has_tiny_pages
= 1;
2010 armv7a
->armv4_5_mmu
.mmu_enabled
= 0;
2013 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2015 /* REVISIT v7a setup should be in a v7a-specific routine */
2016 arm_init_arch_info(target
, armv4_5
);
2017 armv7a
->common_magic
= ARMV7_COMMON_MAGIC
;
2019 target_register_timer_callback(cortex_a9_handle_target_request
, 1, 1, target
);
2024 static int cortex_a9_target_create(struct target
*target
, Jim_Interp
*interp
)
2026 struct cortex_a9_common
*cortex_a9
= calloc(1, sizeof(struct cortex_a9_common
));
2028 return cortex_a9_init_arch_info(target
, cortex_a9
, target
->tap
);
2031 static int cortex_a9_get_ttb(struct target
*target
, uint32_t *result
)
2033 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2034 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2035 uint32_t ttb
= 0, retval
= ERROR_OK
;
2037 /* current_address_mode is set inside cortex_a9_virt2phys()
2038 where we can determine if address belongs to user or kernel */
2039 if(cortex_a9
->current_address_mode
== ARM_MODE_SVC
)
2041 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2042 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2043 0, 1, /* op1, op2 */
2044 2, 0, /* CRn, CRm */
2046 if (retval
!= ERROR_OK
)
2049 else if(cortex_a9
->current_address_mode
== ARM_MODE_USR
)
2051 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2052 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2053 0, 0, /* op1, op2 */
2054 2, 0, /* CRn, CRm */
2056 if (retval
!= ERROR_OK
)
2059 /* we don't know whose address is: user or kernel
2060 we assume that if we are in kernel mode then
2061 address belongs to kernel else if in user mode
2063 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_SVC
)
2065 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2066 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2067 0, 1, /* op1, op2 */
2068 2, 0, /* CRn, CRm */
2070 if (retval
!= ERROR_OK
)
2073 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_USR
)
2075 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2076 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2077 0, 0, /* op1, op2 */
2078 2, 0, /* CRn, CRm */
2080 if (retval
!= ERROR_OK
)
2083 /* finally we don't know whose ttb to use: user or kernel */
2085 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2094 static int cortex_a9_disable_mmu_caches(struct target
*target
, int mmu
,
2095 int d_u_cache
, int i_cache
)
2097 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2098 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2099 uint32_t cp15_control
;
2102 /* read cp15 control register */
2103 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2104 0, 0, /* op1, op2 */
2105 1, 0, /* CRn, CRm */
2107 if (retval
!= ERROR_OK
)
2112 cp15_control
&= ~0x1U
;
2115 cp15_control
&= ~0x4U
;
2118 cp15_control
&= ~0x1000U
;
2120 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2121 0, 0, /* op1, op2 */
2122 1, 0, /* CRn, CRm */
2127 static int cortex_a9_enable_mmu_caches(struct target
*target
, int mmu
,
2128 int d_u_cache
, int i_cache
)
2130 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2131 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2132 uint32_t cp15_control
;
2135 /* read cp15 control register */
2136 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2137 0, 0, /* op1, op2 */
2138 1, 0, /* CRn, CRm */
2140 if (retval
!= ERROR_OK
)
2144 cp15_control
|= 0x1U
;
2147 cp15_control
|= 0x4U
;
2150 cp15_control
|= 0x1000U
;
2152 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2153 0, 0, /* op1, op2 */
2154 1, 0, /* CRn, CRm */
2160 static int cortex_a9_mmu(struct target
*target
, int *enabled
)
2162 if (target
->state
!= TARGET_HALTED
) {
2163 LOG_ERROR("%s: target not halted", __func__
);
2164 return ERROR_TARGET_INVALID
;
2167 *enabled
= target_to_cortex_a9(target
)->armv7a_common
.armv4_5_mmu
.mmu_enabled
;
2171 static int cortex_a9_virt2phys(struct target
*target
,
2172 uint32_t virt
, uint32_t *phys
)
2175 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2176 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2177 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2179 /* We assume that virtual address is separated
2180 between user and kernel in Linux style:
2181 0x00000000-0xbfffffff - User space
2182 0xc0000000-0xffffffff - Kernel space */
2183 if( virt
< 0xc0000000 ) /* Linux user space */
2184 cortex_a9
->current_address_mode
= ARM_MODE_USR
;
2185 else /* Linux kernel */
2186 cortex_a9
->current_address_mode
= ARM_MODE_SVC
;
2188 int retval
= armv4_5_mmu_translate_va(target
,
2189 &armv7a
->armv4_5_mmu
, virt
, &cb
, &ret
);
2190 if (retval
!= ERROR_OK
)
2192 /* Reset the flag. We don't want someone else to use it by error */
2193 cortex_a9
->current_address_mode
= ARM_MODE_ANY
;
2199 COMMAND_HANDLER(cortex_a9_handle_cache_info_command
)
2201 struct target
*target
= get_current_target(CMD_CTX
);
2202 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2204 return armv4_5_handle_cache_info_command(CMD_CTX
,
2205 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
2209 COMMAND_HANDLER(cortex_a9_handle_dbginit_command
)
2211 struct target
*target
= get_current_target(CMD_CTX
);
2212 if (!target_was_examined(target
))
2214 LOG_ERROR("target not examined yet");
2218 return cortex_a9_init_debug_access(target
);
2221 static const struct command_registration cortex_a9_exec_command_handlers
[] = {
2223 .name
= "cache_info",
2224 .handler
= cortex_a9_handle_cache_info_command
,
2225 .mode
= COMMAND_EXEC
,
2226 .help
= "display information about target caches",
2230 .handler
= cortex_a9_handle_dbginit_command
,
2231 .mode
= COMMAND_EXEC
,
2232 .help
= "Initialize core debug",
2234 COMMAND_REGISTRATION_DONE
2236 static const struct command_registration cortex_a9_command_handlers
[] = {
2238 .chain
= arm_command_handlers
,
2241 .chain
= armv7a_command_handlers
,
2244 .name
= "cortex_a9",
2245 .mode
= COMMAND_ANY
,
2246 .help
= "Cortex-A9 command group",
2247 .chain
= cortex_a9_exec_command_handlers
,
2249 COMMAND_REGISTRATION_DONE
2252 struct target_type cortexa9_target
= {
2253 .name
= "cortex_a9",
2255 .poll
= cortex_a9_poll
,
2256 .arch_state
= armv7a_arch_state
,
2258 .target_request_data
= NULL
,
2260 .halt
= cortex_a9_halt
,
2261 .resume
= cortex_a9_resume
,
2262 .step
= cortex_a9_step
,
2264 .assert_reset
= cortex_a9_assert_reset
,
2265 .deassert_reset
= cortex_a9_deassert_reset
,
2266 .soft_reset_halt
= NULL
,
2268 /* REVISIT allow exporting VFP3 registers ... */
2269 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
2271 .read_memory
= cortex_a9_read_memory
,
2272 .write_memory
= cortex_a9_write_memory
,
2273 .bulk_write_memory
= cortex_a9_bulk_write_memory
,
2275 .checksum_memory
= arm_checksum_memory
,
2276 .blank_check_memory
= arm_blank_check_memory
,
2278 .run_algorithm
= armv4_5_run_algorithm
,
2280 .add_breakpoint
= cortex_a9_add_breakpoint
,
2281 .remove_breakpoint
= cortex_a9_remove_breakpoint
,
2282 .add_watchpoint
= NULL
,
2283 .remove_watchpoint
= NULL
,
2285 .commands
= cortex_a9_command_handlers
,
2286 .target_create
= cortex_a9_target_create
,
2287 .init_target
= cortex_a9_init_target
,
2288 .examine
= cortex_a9_examine
,
2290 .read_phys_memory
= cortex_a9_read_phys_memory
,
2291 .write_phys_memory
= cortex_a9_write_phys_memory
,
2292 .mmu
= cortex_a9_mmu
,
2293 .virt2phys
= cortex_a9_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)