1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
34 ***************************************************************************/
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
47 static int cortex_a9_poll(struct target
*target
);
48 static int cortex_a9_debug_entry(struct target
*target
);
49 static int cortex_a9_restore_context(struct target
*target
, bool bpwp
);
50 static int cortex_a9_set_breakpoint(struct target
*target
,
51 struct breakpoint
*breakpoint
, uint8_t matchmode
);
52 static int cortex_a9_unset_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
);
54 static int cortex_a9_dap_read_coreregister_u32(struct target
*target
,
55 uint32_t *value
, int regnum
);
56 static int cortex_a9_dap_write_coreregister_u32(struct target
*target
,
57 uint32_t value
, int regnum
);
58 static int cortex_a9_mmu(struct target
*target
, int *enabled
);
59 static int cortex_a9_virt2phys(struct target
*target
,
60 uint32_t virt
, uint32_t *phys
);
61 static int cortex_a9_disable_mmu_caches(struct target
*target
, int mmu
,
62 int d_u_cache
, int i_cache
);
63 static int cortex_a9_enable_mmu_caches(struct target
*target
, int mmu
,
64 int d_u_cache
, int i_cache
);
65 static int cortex_a9_get_ttb(struct target
*target
, uint32_t *result
);
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
80 static int cortex_a9_init_debug_access(struct target
*target
)
82 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
83 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
89 /* Unlocking the debug registers for modification */
90 /* The debugport might be uninitialised so try twice */
91 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
92 armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
93 if (retval
!= ERROR_OK
)
96 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
97 armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
98 if (retval
== ERROR_OK
)
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
103 if (retval
!= ERROR_OK
)
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
108 armv7a
->debug_base
+ CPUDBG_PRSR
, &dummy
);
109 if (retval
!= ERROR_OK
)
112 /* Enabling of instruction execution in debug mode is done in debug_entry code */
114 /* Resync breakpoint registers */
116 /* Since this is likely called from init or reset, update target state information*/
117 return cortex_a9_poll(target
);
120 /* To reduce needless round-trips, pass in a pointer to the current
121 * DSCR value. Initialize it to zero if you just need to know the
122 * value on return from this function; or DSCR_INSTR_COMP if you
123 * happen to know that no instruction is pending.
125 static int cortex_a9_exec_opcode(struct target
*target
,
126 uint32_t opcode
, uint32_t *dscr_p
)
130 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
131 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
133 dscr
= dscr_p
? *dscr_p
: 0;
135 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
137 /* Wait for InstrCompl bit to be set */
138 long long then
= timeval_ms();
139 while ((dscr
& DSCR_INSTR_COMP
) == 0)
141 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
142 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
143 if (retval
!= ERROR_OK
)
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
148 if (timeval_ms() > then
+ 1000)
150 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
155 retval
= mem_ap_sel_write_u32(swjdp
, swjdp_debugap
,
156 armv7a
->debug_base
+ CPUDBG_ITR
, opcode
);
157 if (retval
!= ERROR_OK
)
163 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
164 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
165 if (retval
!= ERROR_OK
)
167 LOG_ERROR("Could not read DSCR register");
170 if (timeval_ms() > then
+ 1000)
172 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
176 while ((dscr
& DSCR_INSTR_COMP
) == 0); /* Wait for InstrCompl bit to be set */
184 /**************************************************************************
185 Read core register with very few exec_opcode, fast but needs work_area.
186 This can cause problems with MMU active.
187 **************************************************************************/
188 static int cortex_a9_read_regs_through_mem(struct target
*target
, uint32_t address
,
191 int retval
= ERROR_OK
;
192 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
193 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
195 retval
= cortex_a9_dap_read_coreregister_u32(target
, regfile
, 0);
196 if (retval
!= ERROR_OK
)
198 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
199 if (retval
!= ERROR_OK
)
201 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL
);
202 if (retval
!= ERROR_OK
)
205 retval
= mem_ap_sel_read_buf_u32(swjdp
, swjdp_memoryap
,
206 (uint8_t *)(®file
[1]), 4*15, address
);
211 static int cortex_a9_dap_read_coreregister_u32(struct target
*target
,
212 uint32_t *value
, int regnum
)
214 int retval
= ERROR_OK
;
215 uint8_t reg
= regnum
&0xFF;
217 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
218 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
225 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
226 retval
= cortex_a9_exec_opcode(target
,
227 ARMV4_5_MCR(14, 0, reg
, 0, 5, 0),
229 if (retval
!= ERROR_OK
)
234 /* "MOV r0, r15"; then move r0 to DCCTX */
235 retval
= cortex_a9_exec_opcode(target
, 0xE1A0000F, &dscr
);
236 if (retval
!= ERROR_OK
)
238 retval
= cortex_a9_exec_opcode(target
,
239 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
241 if (retval
!= ERROR_OK
)
246 /* "MRS r0, CPSR" or "MRS r0, SPSR"
247 * then move r0 to DCCTX
249 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRS(0, reg
& 1), &dscr
);
250 if (retval
!= ERROR_OK
)
252 retval
= cortex_a9_exec_opcode(target
,
253 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
255 if (retval
!= ERROR_OK
)
259 /* Wait for DTRRXfull then read DTRRTX */
260 long long then
= timeval_ms();
261 while ((dscr
& DSCR_DTR_TX_FULL
) == 0)
263 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
264 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
265 if (retval
!= ERROR_OK
)
267 if (timeval_ms() > then
+ 1000)
269 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
274 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
275 armv7a
->debug_base
+ CPUDBG_DTRTX
, value
);
276 LOG_DEBUG("read DCC 0x%08" PRIx32
, *value
);
281 static int cortex_a9_dap_write_coreregister_u32(struct target
*target
,
282 uint32_t value
, int regnum
)
284 int retval
= ERROR_OK
;
285 uint8_t Rd
= regnum
&0xFF;
287 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
288 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
290 LOG_DEBUG("register %i, value 0x%08" PRIx32
, regnum
, value
);
292 /* Check that DCCRX is not full */
293 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
294 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
295 if (retval
!= ERROR_OK
)
297 if (dscr
& DSCR_DTR_RX_FULL
)
299 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
300 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
301 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
303 if (retval
!= ERROR_OK
)
310 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
311 LOG_DEBUG("write DCC 0x%08" PRIx32
, value
);
312 retval
= mem_ap_sel_write_u32(swjdp
, swjdp_debugap
,
313 armv7a
->debug_base
+ CPUDBG_DTRRX
, value
);
314 if (retval
!= ERROR_OK
)
319 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
320 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, Rd
, 0, 5, 0),
322 if (retval
!= ERROR_OK
)
327 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
330 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
332 if (retval
!= ERROR_OK
)
334 retval
= cortex_a9_exec_opcode(target
, 0xE1A0F000, &dscr
);
335 if (retval
!= ERROR_OK
)
340 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
341 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
343 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
345 if (retval
!= ERROR_OK
)
347 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MSR_GP(0, 0xF, Rd
& 1),
349 if (retval
!= ERROR_OK
)
352 /* "Prefetch flush" after modifying execution status in CPSR */
355 retval
= cortex_a9_exec_opcode(target
,
356 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
358 if (retval
!= ERROR_OK
)
366 /* Write to memory mapped registers directly with no cache or mmu handling */
367 static int cortex_a9_dap_write_memap_register_u32(struct target
*target
, uint32_t address
, uint32_t value
)
370 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
371 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
373 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
, address
, value
);
379 * Cortex-A9 implementation of Debug Programmer's Model
381 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
382 * so there's no need to poll for it before executing an instruction.
384 * NOTE that in several of these cases the "stall" mode might be useful.
385 * It'd let us queue a few operations together... prepare/finish might
386 * be the places to enable/disable that mode.
389 static inline struct cortex_a9_common
*dpm_to_a9(struct arm_dpm
*dpm
)
391 return container_of(dpm
, struct cortex_a9_common
, armv7a_common
.dpm
);
394 static int cortex_a9_write_dcc(struct cortex_a9_common
*a9
, uint32_t data
)
396 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
397 return mem_ap_sel_write_u32(&a9
->armv7a_common
.dap
, swjdp_debugap
,
398 a9
->armv7a_common
.debug_base
+ CPUDBG_DTRRX
, data
);
401 static int cortex_a9_read_dcc(struct cortex_a9_common
*a9
, uint32_t *data
,
404 struct adiv5_dap
*swjdp
= &a9
->armv7a_common
.dap
;
405 uint32_t dscr
= DSCR_INSTR_COMP
;
411 /* Wait for DTRRXfull */
412 long long then
= timeval_ms();
413 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
414 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
415 a9
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
417 if (retval
!= ERROR_OK
)
419 if (timeval_ms() > then
+ 1000)
421 LOG_ERROR("Timeout waiting for read dcc");
426 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
427 a9
->armv7a_common
.debug_base
+ CPUDBG_DTRTX
, data
);
428 if (retval
!= ERROR_OK
)
430 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
438 static int cortex_a9_dpm_prepare(struct arm_dpm
*dpm
)
440 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
441 struct adiv5_dap
*swjdp
= &a9
->armv7a_common
.dap
;
445 /* set up invariant: INSTR_COMP is set after ever DPM operation */
446 long long then
= timeval_ms();
449 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
450 a9
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
452 if (retval
!= ERROR_OK
)
454 if ((dscr
& DSCR_INSTR_COMP
) != 0)
456 if (timeval_ms() > then
+ 1000)
458 LOG_ERROR("Timeout waiting for dpm prepare");
463 /* this "should never happen" ... */
464 if (dscr
& DSCR_DTR_RX_FULL
) {
465 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
467 retval
= cortex_a9_exec_opcode(
468 a9
->armv7a_common
.armv4_5_common
.target
,
469 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
471 if (retval
!= ERROR_OK
)
478 static int cortex_a9_dpm_finish(struct arm_dpm
*dpm
)
480 /* REVISIT what could be done here? */
484 static int cortex_a9_instr_write_data_dcc(struct arm_dpm
*dpm
,
485 uint32_t opcode
, uint32_t data
)
487 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
489 uint32_t dscr
= DSCR_INSTR_COMP
;
491 retval
= cortex_a9_write_dcc(a9
, data
);
492 if (retval
!= ERROR_OK
)
495 return cortex_a9_exec_opcode(
496 a9
->armv7a_common
.armv4_5_common
.target
,
501 static int cortex_a9_instr_write_data_r0(struct arm_dpm
*dpm
,
502 uint32_t opcode
, uint32_t data
)
504 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
505 uint32_t dscr
= DSCR_INSTR_COMP
;
508 retval
= cortex_a9_write_dcc(a9
, data
);
509 if (retval
!= ERROR_OK
)
512 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
513 retval
= cortex_a9_exec_opcode(
514 a9
->armv7a_common
.armv4_5_common
.target
,
515 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
517 if (retval
!= ERROR_OK
)
520 /* then the opcode, taking data from R0 */
521 retval
= cortex_a9_exec_opcode(
522 a9
->armv7a_common
.armv4_5_common
.target
,
529 static int cortex_a9_instr_cpsr_sync(struct arm_dpm
*dpm
)
531 struct target
*target
= dpm
->arm
->target
;
532 uint32_t dscr
= DSCR_INSTR_COMP
;
534 /* "Prefetch flush" after modifying execution status in CPSR */
535 return cortex_a9_exec_opcode(target
,
536 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
540 static int cortex_a9_instr_read_data_dcc(struct arm_dpm
*dpm
,
541 uint32_t opcode
, uint32_t *data
)
543 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
545 uint32_t dscr
= DSCR_INSTR_COMP
;
547 /* the opcode, writing data to DCC */
548 retval
= cortex_a9_exec_opcode(
549 a9
->armv7a_common
.armv4_5_common
.target
,
552 if (retval
!= ERROR_OK
)
555 return cortex_a9_read_dcc(a9
, data
, &dscr
);
559 static int cortex_a9_instr_read_data_r0(struct arm_dpm
*dpm
,
560 uint32_t opcode
, uint32_t *data
)
562 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
563 uint32_t dscr
= DSCR_INSTR_COMP
;
566 /* the opcode, writing data to R0 */
567 retval
= cortex_a9_exec_opcode(
568 a9
->armv7a_common
.armv4_5_common
.target
,
571 if (retval
!= ERROR_OK
)
574 /* write R0 to DCC */
575 retval
= cortex_a9_exec_opcode(
576 a9
->armv7a_common
.armv4_5_common
.target
,
577 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
579 if (retval
!= ERROR_OK
)
582 return cortex_a9_read_dcc(a9
, data
, &dscr
);
585 static int cortex_a9_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
586 uint32_t addr
, uint32_t control
)
588 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
589 uint32_t vr
= a9
->armv7a_common
.debug_base
;
590 uint32_t cr
= a9
->armv7a_common
.debug_base
;
594 case 0 ... 15: /* breakpoints */
595 vr
+= CPUDBG_BVR_BASE
;
596 cr
+= CPUDBG_BCR_BASE
;
598 case 16 ... 31: /* watchpoints */
599 vr
+= CPUDBG_WVR_BASE
;
600 cr
+= CPUDBG_WCR_BASE
;
609 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
610 (unsigned) vr
, (unsigned) cr
);
612 retval
= cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
,
614 if (retval
!= ERROR_OK
)
616 retval
= cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
,
621 static int cortex_a9_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
623 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
628 cr
= a9
->armv7a_common
.debug_base
+ CPUDBG_BCR_BASE
;
631 cr
= a9
->armv7a_common
.debug_base
+ CPUDBG_WCR_BASE
;
639 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr
);
641 /* clear control register */
642 return cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
645 static int cortex_a9_dpm_setup(struct cortex_a9_common
*a9
, uint32_t didr
)
647 struct arm_dpm
*dpm
= &a9
->armv7a_common
.dpm
;
650 dpm
->arm
= &a9
->armv7a_common
.armv4_5_common
;
653 dpm
->prepare
= cortex_a9_dpm_prepare
;
654 dpm
->finish
= cortex_a9_dpm_finish
;
656 dpm
->instr_write_data_dcc
= cortex_a9_instr_write_data_dcc
;
657 dpm
->instr_write_data_r0
= cortex_a9_instr_write_data_r0
;
658 dpm
->instr_cpsr_sync
= cortex_a9_instr_cpsr_sync
;
660 dpm
->instr_read_data_dcc
= cortex_a9_instr_read_data_dcc
;
661 dpm
->instr_read_data_r0
= cortex_a9_instr_read_data_r0
;
663 dpm
->bpwp_enable
= cortex_a9_bpwp_enable
;
664 dpm
->bpwp_disable
= cortex_a9_bpwp_disable
;
666 retval
= arm_dpm_setup(dpm
);
667 if (retval
== ERROR_OK
)
668 retval
= arm_dpm_initialize(dpm
);
675 * Cortex-A9 Run control
678 static int cortex_a9_poll(struct target
*target
)
680 int retval
= ERROR_OK
;
682 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
683 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
684 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
685 enum target_state prev_target_state
= target
->state
;
687 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
688 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
689 if (retval
!= ERROR_OK
)
693 cortex_a9
->cpudbg_dscr
= dscr
;
695 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
))
697 if (prev_target_state
!= TARGET_HALTED
)
699 /* We have a halting debug event */
700 LOG_DEBUG("Target halted");
701 target
->state
= TARGET_HALTED
;
702 if ((prev_target_state
== TARGET_RUNNING
)
703 || (prev_target_state
== TARGET_RESET
))
705 retval
= cortex_a9_debug_entry(target
);
706 if (retval
!= ERROR_OK
)
709 target_call_event_callbacks(target
,
710 TARGET_EVENT_HALTED
);
712 if (prev_target_state
== TARGET_DEBUG_RUNNING
)
716 retval
= cortex_a9_debug_entry(target
);
717 if (retval
!= ERROR_OK
)
720 target_call_event_callbacks(target
,
721 TARGET_EVENT_DEBUG_HALTED
);
725 else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
727 target
->state
= TARGET_RUNNING
;
731 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
732 target
->state
= TARGET_UNKNOWN
;
738 static int cortex_a9_halt(struct target
*target
)
740 int retval
= ERROR_OK
;
742 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
743 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
746 * Tell the core to be halted by writing DRCR with 0x1
747 * and then wait for the core to be halted.
749 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
750 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_HALT
);
751 if (retval
!= ERROR_OK
)
755 * enter halting debug mode
757 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
758 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
759 if (retval
!= ERROR_OK
)
762 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
763 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
| DSCR_HALT_DBG_MODE
);
764 if (retval
!= ERROR_OK
)
767 long long then
= timeval_ms();
770 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
771 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
772 if (retval
!= ERROR_OK
)
774 if ((dscr
& DSCR_CORE_HALTED
) != 0)
778 if (timeval_ms() > then
+ 1000)
780 LOG_ERROR("Timeout waiting for halt");
785 target
->debug_reason
= DBG_REASON_DBGRQ
;
790 static int cortex_a9_resume(struct target
*target
, int current
,
791 uint32_t address
, int handle_breakpoints
, int debug_execution
)
793 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
794 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
795 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
798 // struct breakpoint *breakpoint = NULL;
799 uint32_t resume_pc
, dscr
;
801 if (!debug_execution
)
802 target_free_all_working_areas(target
);
807 /* Disable interrupts */
808 /* We disable interrupts in the PRIMASK register instead of
809 * masking with C_MASKINTS,
810 * This is probably the same issue as Cortex-M3 Errata 377493:
811 * C_MASKINTS in parallel with disabled interrupts can cause
812 * local faults to not be taken. */
813 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].value
, 0, 32, 1);
814 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].dirty
= 1;
815 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].valid
= 1;
817 /* Make sure we are in Thumb mode */
818 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32,
819 buf_get_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32) | (1 << 24));
820 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].dirty
= 1;
821 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].valid
= 1;
825 /* current = 1: continue on current pc, otherwise continue at <address> */
826 resume_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
830 /* Make sure that the Armv7 gdb thumb fixups does not
831 * kill the return address
833 switch (armv4_5
->core_state
)
836 resume_pc
&= 0xFFFFFFFC;
838 case ARM_STATE_THUMB
:
839 case ARM_STATE_THUMB_EE
:
840 /* When the return address is loaded into PC
841 * bit 0 must be 1 to stay in Thumb state
845 case ARM_STATE_JAZELLE
:
846 LOG_ERROR("How do I resume into Jazelle state??");
849 LOG_DEBUG("resume pc = 0x%08" PRIx32
, resume_pc
);
850 buf_set_u32(armv4_5
->pc
->value
, 0, 32, resume_pc
);
851 armv4_5
->pc
->dirty
= 1;
852 armv4_5
->pc
->valid
= 1;
854 retval
= cortex_a9_restore_context(target
, handle_breakpoints
);
855 if (retval
!= ERROR_OK
)
859 /* the front-end may request us not to handle breakpoints */
860 if (handle_breakpoints
)
862 /* Single step past breakpoint at current address */
863 if ((breakpoint
= breakpoint_find(target
, resume_pc
)))
865 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
866 cortex_m3_unset_breakpoint(target
, breakpoint
);
867 cortex_m3_single_step_core(target
);
868 cortex_m3_set_breakpoint(target
, breakpoint
);
875 * Restart core and wait for it to be started. Clear ITRen and sticky
876 * exception flags: see ARMv7 ARM, C5.9.
878 * REVISIT: for single stepping, we probably want to
879 * disable IRQs by default, with optional override...
882 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
883 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
884 if (retval
!= ERROR_OK
)
887 if ((dscr
& DSCR_INSTR_COMP
) == 0)
888 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
890 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
891 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
& ~DSCR_ITR_EN
);
892 if (retval
!= ERROR_OK
)
895 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
896 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_RESTART
| DRCR_CLEAR_EXCEPTIONS
);
897 if (retval
!= ERROR_OK
)
900 long long then
= timeval_ms();
903 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
904 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
905 if (retval
!= ERROR_OK
)
907 if ((dscr
& DSCR_CORE_RESTARTED
) != 0)
909 if (timeval_ms() > then
+ 1000)
911 LOG_ERROR("Timeout waiting for resume");
916 target
->debug_reason
= DBG_REASON_NOTHALTED
;
917 target
->state
= TARGET_RUNNING
;
919 /* registers are now invalid */
920 register_cache_invalidate(armv4_5
->core_cache
);
922 if (!debug_execution
)
924 target
->state
= TARGET_RUNNING
;
925 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
926 LOG_DEBUG("target resumed at 0x%" PRIx32
, resume_pc
);
930 target
->state
= TARGET_DEBUG_RUNNING
;
931 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
932 LOG_DEBUG("target debug resumed at 0x%" PRIx32
, resume_pc
);
938 static int cortex_a9_debug_entry(struct target
*target
)
941 uint32_t regfile
[16], cpsr
, dscr
;
942 int retval
= ERROR_OK
;
943 struct working_area
*regfile_working_area
= NULL
;
944 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
945 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
946 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
947 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
950 LOG_DEBUG("dscr = 0x%08" PRIx32
, cortex_a9
->cpudbg_dscr
);
952 /* REVISIT surely we should not re-read DSCR !! */
953 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
954 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
955 if (retval
!= ERROR_OK
)
958 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
959 * imprecise data aborts get discarded by issuing a Data
960 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
963 /* Enable the ITR execution once we are in debug mode */
965 retval
= mem_ap_sel_write_atomic_u32(swjdp
, swjdp_debugap
,
966 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
967 if (retval
!= ERROR_OK
)
970 /* Examine debug reason */
971 arm_dpm_report_dscr(&armv7a
->dpm
, cortex_a9
->cpudbg_dscr
);
973 /* save address of instruction that triggered the watchpoint? */
974 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
977 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
978 armv7a
->debug_base
+ CPUDBG_WFAR
,
980 if (retval
!= ERROR_OK
)
982 arm_dpm_report_wfar(&armv7a
->dpm
, wfar
);
985 /* REVISIT fast_reg_read is never set ... */
987 /* Examine target state and mode */
988 if (cortex_a9
->fast_reg_read
)
989 target_alloc_working_area(target
, 64, ®file_working_area
);
991 /* First load register acessible through core debug port*/
992 if (!regfile_working_area
)
994 retval
= arm_dpm_read_current_registers(&armv7a
->dpm
);
998 retval
= cortex_a9_read_regs_through_mem(target
,
999 regfile_working_area
->address
, regfile
);
1001 target_free_working_area(target
, regfile_working_area
);
1002 if (retval
!= ERROR_OK
)
1007 /* read Current PSR */
1008 retval
= cortex_a9_dap_read_coreregister_u32(target
, &cpsr
, 16);
1009 if (retval
!= ERROR_OK
)
1012 LOG_DEBUG("cpsr: %8.8" PRIx32
, cpsr
);
1014 arm_set_cpsr(armv4_5
, cpsr
);
1017 for (i
= 0; i
<= ARM_PC
; i
++)
1019 reg
= arm_reg_current(armv4_5
, i
);
1021 buf_set_u32(reg
->value
, 0, 32, regfile
[i
]);
1026 /* Fixup PC Resume Address */
1027 if (cpsr
& (1 << 5))
1029 // T bit set for Thumb or ThumbEE state
1030 regfile
[ARM_PC
] -= 4;
1035 regfile
[ARM_PC
] -= 8;
1039 buf_set_u32(reg
->value
, 0, 32, regfile
[ARM_PC
]);
1040 reg
->dirty
= reg
->valid
;
1044 /* TODO, Move this */
1045 uint32_t cp15_control_register
, cp15_cacr
, cp15_nacr
;
1046 cortex_a9_read_cp(target
, &cp15_control_register
, 15, 0, 1, 0, 0);
1047 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register
);
1049 cortex_a9_read_cp(target
, &cp15_cacr
, 15, 0, 1, 0, 2);
1050 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr
);
1052 cortex_a9_read_cp(target
, &cp15_nacr
, 15, 0, 1, 1, 2);
1053 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr
);
1056 /* Are we in an exception handler */
1057 // armv4_5->exception_number = 0;
1058 if (armv7a
->post_debug_entry
)
1060 retval
= armv7a
->post_debug_entry(target
);
1061 if (retval
!= ERROR_OK
)
1068 static int cortex_a9_post_debug_entry(struct target
*target
)
1070 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1071 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1075 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1076 0, 0, /* op1, op2 */
1077 1, 0, /* CRn, CRm */
1078 &cortex_a9
->cp15_control_reg
);
1079 if (retval
!= ERROR_OK
)
1081 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32
, cortex_a9
->cp15_control_reg
);
1083 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
1085 uint32_t cache_type_reg
;
1087 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1088 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1089 0, 1, /* op1, op2 */
1090 0, 0, /* CRn, CRm */
1092 if (retval
!= ERROR_OK
)
1094 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg
);
1096 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1097 armv4_5_identify_cache(cache_type_reg
,
1098 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
1101 armv7a
->armv4_5_mmu
.mmu_enabled
=
1102 (cortex_a9
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1103 armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
=
1104 (cortex_a9
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1105 armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
=
1106 (cortex_a9
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1111 static int cortex_a9_step(struct target
*target
, int current
, uint32_t address
,
1112 int handle_breakpoints
)
1114 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1115 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1116 struct breakpoint
*breakpoint
= NULL
;
1117 struct breakpoint stepbreakpoint
;
1121 if (target
->state
!= TARGET_HALTED
)
1123 LOG_WARNING("target not halted");
1124 return ERROR_TARGET_NOT_HALTED
;
1127 /* current = 1: continue on current pc, otherwise continue at <address> */
1131 buf_set_u32(r
->value
, 0, 32, address
);
1135 address
= buf_get_u32(r
->value
, 0, 32);
1138 /* The front-end may request us not to handle breakpoints.
1139 * But since Cortex-A9 uses breakpoint for single step,
1140 * we MUST handle breakpoints.
1142 handle_breakpoints
= 1;
1143 if (handle_breakpoints
) {
1144 breakpoint
= breakpoint_find(target
, address
);
1146 cortex_a9_unset_breakpoint(target
, breakpoint
);
1149 /* Setup single step breakpoint */
1150 stepbreakpoint
.address
= address
;
1151 stepbreakpoint
.length
= (armv4_5
->core_state
== ARM_STATE_THUMB
)
1153 stepbreakpoint
.type
= BKPT_HARD
;
1154 stepbreakpoint
.set
= 0;
1156 /* Break on IVA mismatch */
1157 cortex_a9_set_breakpoint(target
, &stepbreakpoint
, 0x04);
1159 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1161 retval
= cortex_a9_resume(target
, 1, address
, 0, 0);
1162 if (retval
!= ERROR_OK
)
1165 long long then
= timeval_ms();
1166 while (target
->state
!= TARGET_HALTED
)
1168 retval
= cortex_a9_poll(target
);
1169 if (retval
!= ERROR_OK
)
1171 if (timeval_ms() > then
+ 1000)
1173 LOG_ERROR("timeout waiting for target halt");
1178 cortex_a9_unset_breakpoint(target
, &stepbreakpoint
);
1180 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1183 cortex_a9_set_breakpoint(target
, breakpoint
, 0);
1185 if (target
->state
!= TARGET_HALTED
)
1186 LOG_DEBUG("target stepped");
1191 static int cortex_a9_restore_context(struct target
*target
, bool bpwp
)
1193 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1197 if (armv7a
->pre_restore_context
)
1198 armv7a
->pre_restore_context(target
);
1200 return arm_dpm_write_dirty_registers(&armv7a
->dpm
, bpwp
);
1205 * Cortex-A9 Breakpoint and watchpoint functions
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int cortex_a9_set_breakpoint(struct target
*target
,
1210 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1215 uint8_t byte_addr_select
= 0x0F;
1216 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1217 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1218 struct cortex_a9_brp
* brp_list
= cortex_a9
->brp_list
;
1220 if (breakpoint
->set
)
1222 LOG_WARNING("breakpoint already set");
1226 if (breakpoint
->type
== BKPT_HARD
)
1228 while (brp_list
[brp_i
].used
&& (brp_i
< cortex_a9
->brp_num
))
1230 if (brp_i
>= cortex_a9
->brp_num
)
1232 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1233 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1235 breakpoint
->set
= brp_i
+ 1;
1236 if (breakpoint
->length
== 2)
1238 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1240 control
= ((matchmode
& 0x7) << 20)
1241 | (byte_addr_select
<< 5)
1243 brp_list
[brp_i
].used
= 1;
1244 brp_list
[brp_i
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1245 brp_list
[brp_i
].control
= control
;
1246 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1247 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1248 brp_list
[brp_i
].value
);
1249 if (retval
!= ERROR_OK
)
1251 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1252 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1253 brp_list
[brp_i
].control
);
1254 if (retval
!= ERROR_OK
)
1256 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1257 brp_list
[brp_i
].control
,
1258 brp_list
[brp_i
].value
);
1260 else if (breakpoint
->type
== BKPT_SOFT
)
1263 if (breakpoint
->length
== 2)
1265 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1269 buf_set_u32(code
, 0, 32, ARMV5_BKPT(0x11));
1271 retval
= target
->type
->read_memory(target
,
1272 breakpoint
->address
& 0xFFFFFFFE,
1273 breakpoint
->length
, 1,
1274 breakpoint
->orig_instr
);
1275 if (retval
!= ERROR_OK
)
1277 retval
= target
->type
->write_memory(target
,
1278 breakpoint
->address
& 0xFFFFFFFE,
1279 breakpoint
->length
, 1, code
);
1280 if (retval
!= ERROR_OK
)
1282 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1288 static int cortex_a9_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1291 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1292 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1293 struct cortex_a9_brp
* brp_list
= cortex_a9
->brp_list
;
1295 if (!breakpoint
->set
)
1297 LOG_WARNING("breakpoint not set");
1301 if (breakpoint
->type
== BKPT_HARD
)
1303 int brp_i
= breakpoint
->set
- 1;
1304 if ((brp_i
< 0) || (brp_i
>= cortex_a9
->brp_num
))
1306 LOG_DEBUG("Invalid BRP number in breakpoint");
1309 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1310 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1311 brp_list
[brp_i
].used
= 0;
1312 brp_list
[brp_i
].value
= 0;
1313 brp_list
[brp_i
].control
= 0;
1314 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1315 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1316 brp_list
[brp_i
].control
);
1317 if (retval
!= ERROR_OK
)
1319 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1320 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1321 brp_list
[brp_i
].value
);
1322 if (retval
!= ERROR_OK
)
1327 /* restore original instruction (kept in target endianness) */
1328 if (breakpoint
->length
== 4)
1330 retval
= target
->type
->write_memory(target
,
1331 breakpoint
->address
& 0xFFFFFFFE,
1332 4, 1, breakpoint
->orig_instr
);
1333 if (retval
!= ERROR_OK
)
1338 retval
= target
->type
->write_memory(target
,
1339 breakpoint
->address
& 0xFFFFFFFE,
1340 2, 1, breakpoint
->orig_instr
);
1341 if (retval
!= ERROR_OK
)
1345 breakpoint
->set
= 0;
1350 static int cortex_a9_add_breakpoint(struct target
*target
,
1351 struct breakpoint
*breakpoint
)
1353 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1355 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a9
->brp_num_available
< 1))
1357 LOG_INFO("no hardware breakpoint available");
1358 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1361 if (breakpoint
->type
== BKPT_HARD
)
1362 cortex_a9
->brp_num_available
--;
1364 return cortex_a9_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1367 static int cortex_a9_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1369 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1372 /* It is perfectly possible to remove breakpoints while the target is running */
1373 if (target
->state
!= TARGET_HALTED
)
1375 LOG_WARNING("target not halted");
1376 return ERROR_TARGET_NOT_HALTED
;
1380 if (breakpoint
->set
)
1382 cortex_a9_unset_breakpoint(target
, breakpoint
);
1383 if (breakpoint
->type
== BKPT_HARD
)
1384 cortex_a9
->brp_num_available
++ ;
1394 * Cortex-A9 Reset functions
1397 static int cortex_a9_assert_reset(struct target
*target
)
1399 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1403 /* FIXME when halt is requested, make it work somehow... */
1405 /* Issue some kind of warm reset. */
1406 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
1407 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1408 } else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1409 /* REVISIT handle "pulls" cases, if there's
1410 * hardware that needs them to work.
1412 jtag_add_reset(0, 1);
1414 LOG_ERROR("%s: how to reset?", target_name(target
));
1418 /* registers are now invalid */
1419 register_cache_invalidate(armv7a
->armv4_5_common
.core_cache
);
1421 target
->state
= TARGET_RESET
;
1426 static int cortex_a9_deassert_reset(struct target
*target
)
1432 /* be certain SRST is off */
1433 jtag_add_reset(0, 0);
1435 retval
= cortex_a9_poll(target
);
1436 if (retval
!= ERROR_OK
)
1439 if (target
->reset_halt
) {
1440 if (target
->state
!= TARGET_HALTED
) {
1441 LOG_WARNING("%s: ran after reset and before halt ...",
1442 target_name(target
));
1443 if ((retval
= target_halt(target
)) != ERROR_OK
)
1452 * Cortex-A9 Memory access
1454 * This is same Cortex M3 but we must also use the correct
1455 * ap number for every access.
1458 static int cortex_a9_read_phys_memory(struct target
*target
,
1459 uint32_t address
, uint32_t size
,
1460 uint32_t count
, uint8_t *buffer
)
1462 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1463 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1464 int retval
= ERROR_INVALID_ARGUMENTS
;
1465 uint8_t apsel
= dap_ap_get_select(swjdp
);
1467 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address
, size
, count
);
1469 if (count
&& buffer
) {
1471 if ( apsel
== swjdp_memoryap
) {
1473 /* read memory through AHB-AP */
1477 retval
= mem_ap_sel_read_buf_u32(swjdp
, swjdp_memoryap
,
1478 buffer
, 4 * count
, address
);
1481 retval
= mem_ap_sel_read_buf_u16(swjdp
, swjdp_memoryap
,
1482 buffer
, 2 * count
, address
);
1485 retval
= mem_ap_sel_read_buf_u8(swjdp
, swjdp_memoryap
,
1486 buffer
, count
, address
);
1492 /* read memory through APB-AP */
1494 uint32_t saved_r0
, saved_r1
;
1495 int nbytes
= count
* size
;
1499 if (target
->state
!= TARGET_HALTED
)
1501 LOG_WARNING("target not halted");
1502 return ERROR_TARGET_NOT_HALTED
;
1505 retval
= cortex_a9_mmu(target
, &enabled
);
1506 if (retval
!= ERROR_OK
)
1511 LOG_WARNING("Reading physical memory through APB with MMU enabled is not yet implemented");
1512 return ERROR_TARGET_FAILURE
;
1515 /* save registers r0 and r1, we are going to corrupt them */
1516 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r0
, 0);
1517 if (retval
!= ERROR_OK
)
1520 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r1
, 1);
1521 if (retval
!= ERROR_OK
)
1524 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
1525 if (retval
!= ERROR_OK
)
1528 while (nbytes
> 0) {
1530 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1531 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_LDRB_IP(1, 0) , NULL
);
1532 if (retval
!= ERROR_OK
)
1535 retval
= cortex_a9_dap_read_coreregister_u32(target
, &data
, 1);
1536 if (retval
!= ERROR_OK
)
1544 /* restore corrupted registers r0 and r1 */
1545 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r0
, 0);
1546 if (retval
!= ERROR_OK
)
1549 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r1
, 1);
1550 if (retval
!= ERROR_OK
)
1559 static int cortex_a9_read_memory(struct target
*target
, uint32_t address
,
1560 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1563 uint32_t virt
, phys
;
1566 /* cortex_a9 handles unaligned memory access */
1568 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address
, size
, count
);
1569 retval
= cortex_a9_mmu(target
, &enabled
);
1570 if (retval
!= ERROR_OK
)
1576 retval
= cortex_a9_virt2phys(target
, virt
, &phys
);
1577 if (retval
!= ERROR_OK
)
1580 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1584 return cortex_a9_read_phys_memory(target
, address
, size
, count
, buffer
);
1587 static int cortex_a9_write_phys_memory(struct target
*target
,
1588 uint32_t address
, uint32_t size
,
1589 uint32_t count
, uint8_t *buffer
)
1591 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1592 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1593 int retval
= ERROR_INVALID_ARGUMENTS
;
1595 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address
, size
, count
);
1597 if (count
&& buffer
) {
1598 uint8_t apsel
= dap_ap_get_select(swjdp
);
1600 if ( apsel
== swjdp_memoryap
) {
1602 /* write memory through AHB-AP */
1605 retval
= mem_ap_sel_write_buf_u32(swjdp
, swjdp_memoryap
,
1606 buffer
, 4 * count
, address
);
1609 retval
= mem_ap_sel_write_buf_u16(swjdp
, swjdp_memoryap
,
1610 buffer
, 2 * count
, address
);
1613 retval
= mem_ap_sel_write_buf_u8(swjdp
, swjdp_memoryap
,
1614 buffer
, count
, address
);
1620 /* write memory through APB-AP */
1622 uint32_t saved_r0
, saved_r1
;
1623 int nbytes
= count
* size
;
1627 if (target
->state
!= TARGET_HALTED
)
1629 LOG_WARNING("target not halted");
1630 return ERROR_TARGET_NOT_HALTED
;
1633 retval
= cortex_a9_mmu(target
, &enabled
);
1634 if (retval
!= ERROR_OK
)
1639 LOG_WARNING("Writing physical memory through APB with MMU enabled is not yet implemented");
1640 return ERROR_TARGET_FAILURE
;
1643 /* save registers r0 and r1, we are going to corrupt them */
1644 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r0
, 0);
1645 if (retval
!= ERROR_OK
)
1648 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r1
, 1);
1649 if (retval
!= ERROR_OK
)
1652 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
1653 if (retval
!= ERROR_OK
)
1656 while (nbytes
> 0) {
1660 retval
= cortex_a9_dap_write_coreregister_u32(target
, data
, 1);
1661 if (retval
!= ERROR_OK
)
1664 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1665 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_STRB_IP(1, 0) , NULL
);
1666 if (retval
!= ERROR_OK
)
1672 /* restore corrupted registers r0 and r1 */
1673 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r0
, 0);
1674 if (retval
!= ERROR_OK
)
1677 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r1
, 1);
1678 if (retval
!= ERROR_OK
)
1681 /* we can return here without invalidating D/I-cache because */
1682 /* access through APB maintains cache coherency */
1688 /* REVISIT this op is generic ARMv7-A/R stuff */
1689 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
)
1691 struct arm_dpm
*dpm
= armv7a
->armv4_5_common
.dpm
;
1693 retval
= dpm
->prepare(dpm
);
1694 if (retval
!= ERROR_OK
)
1697 /* The Cache handling will NOT work with MMU active, the
1698 * wrong addresses will be invalidated!
1700 * For both ICache and DCache, walk all cache lines in the
1701 * address range. Cortex-A9 has fixed 64 byte line length.
1703 * REVISIT per ARMv7, these may trigger watchpoints ...
1706 /* invalidate I-Cache */
1707 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
)
1709 /* ICIMVAU - Invalidate Cache single entry
1711 * MCR p15, 0, r0, c7, c5, 1
1713 for (uint32_t cacheline
= address
;
1714 cacheline
< address
+ size
* count
;
1716 retval
= dpm
->instr_write_data_r0(dpm
,
1717 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1719 if (retval
!= ERROR_OK
)
1724 /* invalidate D-Cache */
1725 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
)
1727 /* DCIMVAC - Invalidate data Cache line
1729 * MCR p15, 0, r0, c7, c6, 1
1731 for (uint32_t cacheline
= address
;
1732 cacheline
< address
+ size
* count
;
1734 retval
= dpm
->instr_write_data_r0(dpm
,
1735 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1737 if (retval
!= ERROR_OK
)
1742 /* (void) */ dpm
->finish(dpm
);
1748 static int cortex_a9_write_memory(struct target
*target
, uint32_t address
,
1749 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1752 uint32_t virt
, phys
;
1755 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address
, size
, count
);
1756 retval
= cortex_a9_mmu(target
, &enabled
);
1757 if (retval
!= ERROR_OK
)
1763 retval
= cortex_a9_virt2phys(target
, virt
, &phys
);
1764 if (retval
!= ERROR_OK
)
1766 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1770 return cortex_a9_write_phys_memory(target
, address
, size
,
1774 static int cortex_a9_bulk_write_memory(struct target
*target
, uint32_t address
,
1775 uint32_t count
, uint8_t *buffer
)
1777 return cortex_a9_write_memory(target
, address
, 4, count
, buffer
);
1780 static int cortex_a9_dcc_read(struct adiv5_dap
*swjdp
, uint8_t *value
, uint8_t *ctrl
)
1785 mem_ap_read_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1786 *ctrl
= (uint8_t)dcrdr
;
1787 *value
= (uint8_t)(dcrdr
>> 8);
1789 LOG_DEBUG("data 0x%x ctrl 0x%x", *value
, *ctrl
);
1791 /* write ack back to software dcc register
1792 * signify we have read data */
1793 if (dcrdr
& (1 << 0))
1796 mem_ap_write_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1803 static int cortex_a9_handle_target_request(void *priv
)
1805 struct target
*target
= priv
;
1806 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1807 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1810 if (!target_was_examined(target
))
1812 if (!target
->dbg_msg_enabled
)
1815 if (target
->state
== TARGET_RUNNING
)
1820 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1821 if (retval
!= ERROR_OK
)
1824 /* check if we have data */
1825 if (ctrl
& (1 << 0))
1829 /* we assume target is quick enough */
1831 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1832 if (retval
!= ERROR_OK
)
1834 request
|= (data
<< 8);
1835 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1836 if (retval
!= ERROR_OK
)
1838 request
|= (data
<< 16);
1839 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1840 if (retval
!= ERROR_OK
)
1842 request
|= (data
<< 24);
1843 target_request(target
, request
);
1851 * Cortex-A9 target information and configuration
1854 static int cortex_a9_examine_first(struct target
*target
)
1856 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1857 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1858 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1860 int retval
= ERROR_OK
;
1861 uint32_t didr
, ctypr
, ttypr
, cpuid
;
1863 /* We do one extra read to ensure DAP is configured,
1864 * we call ahbap_debugport_init(swjdp) instead
1866 retval
= ahbap_debugport_init(swjdp
);
1867 if (retval
!= ERROR_OK
)
1871 * FIXME: assuming omap4430
1873 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1874 * 0x80000000 is cpu0 coresight region
1876 if (target
->coreid
> 3) {
1877 LOG_ERROR("cortex_a9 supports up to 4 cores");
1878 return ERROR_INVALID_ARGUMENTS
;
1880 armv7a
->debug_base
= 0x80000000 |
1881 ((target
->coreid
& 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT
);
1883 retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1884 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
1885 if (retval
!= ERROR_OK
)
1888 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1889 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
)) != ERROR_OK
)
1891 LOG_DEBUG("Examine %s failed", "CPUID");
1895 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1896 armv7a
->debug_base
+ CPUDBG_CTYPR
, &ctypr
)) != ERROR_OK
)
1898 LOG_DEBUG("Examine %s failed", "CTYPR");
1902 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1903 armv7a
->debug_base
+ CPUDBG_TTYPR
, &ttypr
)) != ERROR_OK
)
1905 LOG_DEBUG("Examine %s failed", "TTYPR");
1909 if ((retval
= mem_ap_sel_read_atomic_u32(swjdp
, swjdp_debugap
,
1910 armv7a
->debug_base
+ CPUDBG_DIDR
, &didr
)) != ERROR_OK
)
1912 LOG_DEBUG("Examine %s failed", "DIDR");
1916 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1917 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
1918 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
1919 LOG_DEBUG("didr = 0x%08" PRIx32
, didr
);
1921 armv7a
->armv4_5_common
.core_type
= ARM_MODE_MON
;
1922 retval
= cortex_a9_dpm_setup(cortex_a9
, didr
);
1923 if (retval
!= ERROR_OK
)
1926 /* Setup Breakpoint Register Pairs */
1927 cortex_a9
->brp_num
= ((didr
>> 24) & 0x0F) + 1;
1928 cortex_a9
->brp_num_context
= ((didr
>> 20) & 0x0F) + 1;
1929 cortex_a9
->brp_num_available
= cortex_a9
->brp_num
;
1930 cortex_a9
->brp_list
= calloc(cortex_a9
->brp_num
, sizeof(struct cortex_a9_brp
));
1931 // cortex_a9->brb_enabled = ????;
1932 for (i
= 0; i
< cortex_a9
->brp_num
; i
++)
1934 cortex_a9
->brp_list
[i
].used
= 0;
1935 if (i
< (cortex_a9
->brp_num
-cortex_a9
->brp_num_context
))
1936 cortex_a9
->brp_list
[i
].type
= BRP_NORMAL
;
1938 cortex_a9
->brp_list
[i
].type
= BRP_CONTEXT
;
1939 cortex_a9
->brp_list
[i
].value
= 0;
1940 cortex_a9
->brp_list
[i
].control
= 0;
1941 cortex_a9
->brp_list
[i
].BRPn
= i
;
1944 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9
->brp_num
);
1946 target_set_examined(target
);
1950 static int cortex_a9_examine(struct target
*target
)
1952 int retval
= ERROR_OK
;
1954 /* don't re-probe hardware after each reset */
1955 if (!target_was_examined(target
))
1956 retval
= cortex_a9_examine_first(target
);
1958 /* Configure core debug access */
1959 if (retval
== ERROR_OK
)
1960 retval
= cortex_a9_init_debug_access(target
);
1966 * Cortex-A9 target creation and initialization
1969 static int cortex_a9_init_target(struct command_context
*cmd_ctx
,
1970 struct target
*target
)
1972 /* examine_first() does a bunch of this */
1976 static int cortex_a9_init_arch_info(struct target
*target
,
1977 struct cortex_a9_common
*cortex_a9
, struct jtag_tap
*tap
)
1979 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1980 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1981 struct adiv5_dap
*dap
= &armv7a
->dap
;
1983 armv7a
->armv4_5_common
.dap
= dap
;
1985 /* Setup struct cortex_a9_common */
1986 cortex_a9
->common_magic
= CORTEX_A9_COMMON_MAGIC
;
1987 armv4_5
->arch_info
= armv7a
;
1989 /* prepare JTAG information for the new target */
1990 cortex_a9
->jtag_info
.tap
= tap
;
1991 cortex_a9
->jtag_info
.scann_size
= 4;
1993 /* Leave (only) generic DAP stuff for debugport_init() */
1994 dap
->jtag_info
= &cortex_a9
->jtag_info
;
1995 dap
->memaccess_tck
= 80;
1997 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1998 dap
->tar_autoincr_block
= (1 << 10);
2000 cortex_a9
->fast_reg_read
= 0;
2002 /* Set default value */
2003 cortex_a9
->current_address_mode
= ARM_MODE_ANY
;
2005 /* register arch-specific functions */
2006 armv7a
->examine_debug_reason
= NULL
;
2008 armv7a
->post_debug_entry
= cortex_a9_post_debug_entry
;
2010 armv7a
->pre_restore_context
= NULL
;
2011 armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
2012 armv7a
->armv4_5_mmu
.get_ttb
= cortex_a9_get_ttb
;
2013 armv7a
->armv4_5_mmu
.read_memory
= cortex_a9_read_phys_memory
;
2014 armv7a
->armv4_5_mmu
.write_memory
= cortex_a9_write_phys_memory
;
2015 armv7a
->armv4_5_mmu
.disable_mmu_caches
= cortex_a9_disable_mmu_caches
;
2016 armv7a
->armv4_5_mmu
.enable_mmu_caches
= cortex_a9_enable_mmu_caches
;
2017 armv7a
->armv4_5_mmu
.has_tiny_pages
= 1;
2018 armv7a
->armv4_5_mmu
.mmu_enabled
= 0;
2021 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2023 /* REVISIT v7a setup should be in a v7a-specific routine */
2024 arm_init_arch_info(target
, armv4_5
);
2025 armv7a
->common_magic
= ARMV7_COMMON_MAGIC
;
2027 target_register_timer_callback(cortex_a9_handle_target_request
, 1, 1, target
);
2032 static int cortex_a9_target_create(struct target
*target
, Jim_Interp
*interp
)
2034 struct cortex_a9_common
*cortex_a9
= calloc(1, sizeof(struct cortex_a9_common
));
2036 return cortex_a9_init_arch_info(target
, cortex_a9
, target
->tap
);
2039 static int cortex_a9_get_ttb(struct target
*target
, uint32_t *result
)
2041 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2042 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2043 uint32_t ttb
= 0, retval
= ERROR_OK
;
2045 /* current_address_mode is set inside cortex_a9_virt2phys()
2046 where we can determine if address belongs to user or kernel */
2047 if(cortex_a9
->current_address_mode
== ARM_MODE_SVC
)
2049 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2050 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2051 0, 1, /* op1, op2 */
2052 2, 0, /* CRn, CRm */
2054 if (retval
!= ERROR_OK
)
2057 else if(cortex_a9
->current_address_mode
== ARM_MODE_USR
)
2059 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2060 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2061 0, 0, /* op1, op2 */
2062 2, 0, /* CRn, CRm */
2064 if (retval
!= ERROR_OK
)
2067 /* we don't know whose address is: user or kernel
2068 we assume that if we are in kernel mode then
2069 address belongs to kernel else if in user mode
2071 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_SVC
)
2073 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2074 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2075 0, 1, /* op1, op2 */
2076 2, 0, /* CRn, CRm */
2078 if (retval
!= ERROR_OK
)
2081 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_USR
)
2083 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2084 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2085 0, 0, /* op1, op2 */
2086 2, 0, /* CRn, CRm */
2088 if (retval
!= ERROR_OK
)
2091 /* finally we don't know whose ttb to use: user or kernel */
2093 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2102 static int cortex_a9_disable_mmu_caches(struct target
*target
, int mmu
,
2103 int d_u_cache
, int i_cache
)
2105 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2106 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2107 uint32_t cp15_control
;
2110 /* read cp15 control register */
2111 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2112 0, 0, /* op1, op2 */
2113 1, 0, /* CRn, CRm */
2115 if (retval
!= ERROR_OK
)
2120 cp15_control
&= ~0x1U
;
2123 cp15_control
&= ~0x4U
;
2126 cp15_control
&= ~0x1000U
;
2128 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2129 0, 0, /* op1, op2 */
2130 1, 0, /* CRn, CRm */
2135 static int cortex_a9_enable_mmu_caches(struct target
*target
, int mmu
,
2136 int d_u_cache
, int i_cache
)
2138 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2139 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2140 uint32_t cp15_control
;
2143 /* read cp15 control register */
2144 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2145 0, 0, /* op1, op2 */
2146 1, 0, /* CRn, CRm */
2148 if (retval
!= ERROR_OK
)
2152 cp15_control
|= 0x1U
;
2155 cp15_control
|= 0x4U
;
2158 cp15_control
|= 0x1000U
;
2160 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2161 0, 0, /* op1, op2 */
2162 1, 0, /* CRn, CRm */
2168 static int cortex_a9_mmu(struct target
*target
, int *enabled
)
2170 if (target
->state
!= TARGET_HALTED
) {
2171 LOG_ERROR("%s: target not halted", __func__
);
2172 return ERROR_TARGET_INVALID
;
2175 *enabled
= target_to_cortex_a9(target
)->armv7a_common
.armv4_5_mmu
.mmu_enabled
;
2179 static int cortex_a9_virt2phys(struct target
*target
,
2180 uint32_t virt
, uint32_t *phys
)
2183 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2184 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2185 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2187 /* We assume that virtual address is separated
2188 between user and kernel in Linux style:
2189 0x00000000-0xbfffffff - User space
2190 0xc0000000-0xffffffff - Kernel space */
2191 if( virt
< 0xc0000000 ) /* Linux user space */
2192 cortex_a9
->current_address_mode
= ARM_MODE_USR
;
2193 else /* Linux kernel */
2194 cortex_a9
->current_address_mode
= ARM_MODE_SVC
;
2196 int retval
= armv4_5_mmu_translate_va(target
,
2197 &armv7a
->armv4_5_mmu
, virt
, &cb
, &ret
);
2198 if (retval
!= ERROR_OK
)
2200 /* Reset the flag. We don't want someone else to use it by error */
2201 cortex_a9
->current_address_mode
= ARM_MODE_ANY
;
2207 COMMAND_HANDLER(cortex_a9_handle_cache_info_command
)
2209 struct target
*target
= get_current_target(CMD_CTX
);
2210 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2212 return armv4_5_handle_cache_info_command(CMD_CTX
,
2213 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
2217 COMMAND_HANDLER(cortex_a9_handle_dbginit_command
)
2219 struct target
*target
= get_current_target(CMD_CTX
);
2220 if (!target_was_examined(target
))
2222 LOG_ERROR("target not examined yet");
2226 return cortex_a9_init_debug_access(target
);
2229 static const struct command_registration cortex_a9_exec_command_handlers
[] = {
2231 .name
= "cache_info",
2232 .handler
= cortex_a9_handle_cache_info_command
,
2233 .mode
= COMMAND_EXEC
,
2234 .help
= "display information about target caches",
2238 .handler
= cortex_a9_handle_dbginit_command
,
2239 .mode
= COMMAND_EXEC
,
2240 .help
= "Initialize core debug",
2242 COMMAND_REGISTRATION_DONE
2244 static const struct command_registration cortex_a9_command_handlers
[] = {
2246 .chain
= arm_command_handlers
,
2249 .chain
= armv7a_command_handlers
,
2252 .name
= "cortex_a9",
2253 .mode
= COMMAND_ANY
,
2254 .help
= "Cortex-A9 command group",
2255 .chain
= cortex_a9_exec_command_handlers
,
2257 COMMAND_REGISTRATION_DONE
2260 struct target_type cortexa9_target
= {
2261 .name
= "cortex_a9",
2263 .poll
= cortex_a9_poll
,
2264 .arch_state
= armv7a_arch_state
,
2266 .target_request_data
= NULL
,
2268 .halt
= cortex_a9_halt
,
2269 .resume
= cortex_a9_resume
,
2270 .step
= cortex_a9_step
,
2272 .assert_reset
= cortex_a9_assert_reset
,
2273 .deassert_reset
= cortex_a9_deassert_reset
,
2274 .soft_reset_halt
= NULL
,
2276 /* REVISIT allow exporting VFP3 registers ... */
2277 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
2279 .read_memory
= cortex_a9_read_memory
,
2280 .write_memory
= cortex_a9_write_memory
,
2281 .bulk_write_memory
= cortex_a9_bulk_write_memory
,
2283 .checksum_memory
= arm_checksum_memory
,
2284 .blank_check_memory
= arm_blank_check_memory
,
2286 .run_algorithm
= armv4_5_run_algorithm
,
2288 .add_breakpoint
= cortex_a9_add_breakpoint
,
2289 .remove_breakpoint
= cortex_a9_remove_breakpoint
,
2290 .add_watchpoint
= NULL
,
2291 .remove_watchpoint
= NULL
,
2293 .commands
= cortex_a9_command_handlers
,
2294 .target_create
= cortex_a9_target_create
,
2295 .init_target
= cortex_a9_init_target
,
2296 .examine
= cortex_a9_examine
,
2298 .read_phys_memory
= cortex_a9_read_phys_memory
,
2299 .write_phys_memory
= cortex_a9_write_phys_memory
,
2300 .mmu
= cortex_a9_mmu
,
2301 .virt2phys
= cortex_a9_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)