1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
46 ***************************************************************************/
52 #include "breakpoints.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
60 static int cortex_a_poll(struct target
*target
);
61 static int cortex_a_debug_entry(struct target
*target
);
62 static int cortex_a_restore_context(struct target
*target
, bool bpwp
);
63 static int cortex_a_set_breakpoint(struct target
*target
,
64 struct breakpoint
*breakpoint
, uint8_t matchmode
);
65 static int cortex_a_set_context_breakpoint(struct target
*target
,
66 struct breakpoint
*breakpoint
, uint8_t matchmode
);
67 static int cortex_a_set_hybrid_breakpoint(struct target
*target
,
68 struct breakpoint
*breakpoint
);
69 static int cortex_a_unset_breakpoint(struct target
*target
,
70 struct breakpoint
*breakpoint
);
71 static int cortex_a_dap_read_coreregister_u32(struct target
*target
,
72 uint32_t *value
, int regnum
);
73 static int cortex_a_dap_write_coreregister_u32(struct target
*target
,
74 uint32_t value
, int regnum
);
75 static int cortex_a_mmu(struct target
*target
, int *enabled
);
76 static int cortex_a_virt2phys(struct target
*target
,
77 uint32_t virt
, uint32_t *phys
);
78 static int cortex_a_read_apb_ab_memory(struct target
*target
,
79 uint32_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target
*target
)
85 int retval
= ERROR_OK
;
86 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
87 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
89 if (cortex_a
->cp15_control_reg
!= cortex_a
->cp15_control_reg_curr
) {
90 cortex_a
->cp15_control_reg_curr
= cortex_a
->cp15_control_reg
;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval
= armv7a
->arm
.mcr(target
, 15,
95 cortex_a
->cp15_control_reg
);
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target
*target
, uint32_t address
)
104 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
105 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
106 uint32_t os_border
= armv7a
->armv7a_mmu
.os_border
;
107 if ((address
< os_border
) &&
108 (armv7a
->arm
.core_mode
== ARM_MODE_SVC
)) {
109 LOG_ERROR("%" PRIx32
" access in userspace and target in supervisor", address
);
112 if ((address
>= os_border
) &&
113 (cortex_a
->curr_mode
!= ARM_MODE_SVC
)) {
114 dpm_modeswitch(&armv7a
->dpm
, ARM_MODE_SVC
);
115 cortex_a
->curr_mode
= ARM_MODE_SVC
;
116 LOG_INFO("%" PRIx32
" access in kernel space and target not in supervisor",
120 if ((address
< os_border
) &&
121 (cortex_a
->curr_mode
== ARM_MODE_SVC
)) {
122 dpm_modeswitch(&armv7a
->dpm
, ARM_MODE_ANY
);
123 cortex_a
->curr_mode
= ARM_MODE_ANY
;
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target
*target
, int enable
)
132 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
133 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
134 int retval
= ERROR_OK
;
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a
->cp15_control_reg
& 0x1U
)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
141 if (!(cortex_a
->cp15_control_reg_curr
& 0x1U
)) {
142 cortex_a
->cp15_control_reg_curr
|= 0x1U
;
143 retval
= armv7a
->arm
.mcr(target
, 15,
146 cortex_a
->cp15_control_reg_curr
);
149 if (cortex_a
->cp15_control_reg_curr
& 0x4U
) {
150 /* data cache is active */
151 cortex_a
->cp15_control_reg_curr
&= ~0x4U
;
152 /* flush data cache armv7 function to be called */
153 if (armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
)
154 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache(target
);
156 if ((cortex_a
->cp15_control_reg_curr
& 0x1U
)) {
157 cortex_a
->cp15_control_reg_curr
&= ~0x1U
;
158 retval
= armv7a
->arm
.mcr(target
, 15,
161 cortex_a
->cp15_control_reg_curr
);
168 * Cortex-A Basic debug access, very low level assumes state is saved
170 static int cortex_a8_init_debug_access(struct target
*target
)
172 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
173 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
181 armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
182 if (retval
!= ERROR_OK
) {
184 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
185 armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
186 if (retval
== ERROR_OK
)
188 "Locking debug access failed on first, but succeeded on second try.");
195 * Cortex-A Basic debug access, very low level assumes state is saved
197 static int cortex_a_init_debug_access(struct target
*target
)
199 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
200 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
203 uint32_t cortex_part_num
;
204 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
207 cortex_part_num
= (cortex_a
->cpuid
& CORTEX_A_MIDR_PARTNUM_MASK
) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT
;
210 switch (cortex_part_num
) {
211 case CORTEX_A15_PARTNUM
:
212 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
213 armv7a
->debug_base
+ CPUDBG_OSLSR
,
215 if (retval
!= ERROR_OK
)
218 LOG_DEBUG("DBGOSLSR 0x%" PRIx32
, dbg_osreg
);
220 if (dbg_osreg
& CPUDBG_OSLAR_LK_MASK
)
221 /* Unlocking the DEBUG OS registers for modification */
222 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
223 armv7a
->debug_base
+ CPUDBG_OSLAR
,
227 case CORTEX_A8_PARTNUM
:
228 case CORTEX_A9_PARTNUM
:
230 retval
= cortex_a8_init_debug_access(target
);
233 if (retval
!= ERROR_OK
)
235 /* Clear Sticky Power Down status Bit in PRSR to enable access to
236 the registers in the Core Power Domain */
237 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
238 armv7a
->debug_base
+ CPUDBG_PRSR
, &dbg_osreg
);
239 LOG_DEBUG("target->coreid %d DBGPRSR 0x%x ", target
->coreid
, dbg_osreg
);
241 if (retval
!= ERROR_OK
)
244 /* Enabling of instruction execution in debug mode is done in debug_entry code */
246 /* Resync breakpoint registers */
248 /* Since this is likely called from init or reset, update target state information*/
249 return cortex_a_poll(target
);
252 /* To reduce needless round-trips, pass in a pointer to the current
253 * DSCR value. Initialize it to zero if you just need to know the
254 * value on return from this function; or DSCR_INSTR_COMP if you
255 * happen to know that no instruction is pending.
257 static int cortex_a_exec_opcode(struct target
*target
,
258 uint32_t opcode
, uint32_t *dscr_p
)
262 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
263 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
265 dscr
= dscr_p
? *dscr_p
: 0;
267 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
269 /* Wait for InstrCompl bit to be set */
270 long long then
= timeval_ms();
271 while ((dscr
& DSCR_INSTR_COMP
) == 0) {
272 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
273 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
274 if (retval
!= ERROR_OK
) {
275 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
278 if (timeval_ms() > then
+ 1000) {
279 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
284 retval
= mem_ap_sel_write_u32(swjdp
, armv7a
->debug_ap
,
285 armv7a
->debug_base
+ CPUDBG_ITR
, opcode
);
286 if (retval
!= ERROR_OK
)
291 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
292 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
293 if (retval
!= ERROR_OK
) {
294 LOG_ERROR("Could not read DSCR register");
297 if (timeval_ms() > then
+ 1000) {
298 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
301 } while ((dscr
& DSCR_INSTR_COMP
) == 0); /* Wait for InstrCompl bit to be set */
309 /**************************************************************************
310 Read core register with very few exec_opcode, fast but needs work_area.
311 This can cause problems with MMU active.
312 **************************************************************************/
313 static int cortex_a_read_regs_through_mem(struct target
*target
, uint32_t address
,
316 int retval
= ERROR_OK
;
317 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
318 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
320 retval
= cortex_a_dap_read_coreregister_u32(target
, regfile
, 0);
321 if (retval
!= ERROR_OK
)
323 retval
= cortex_a_dap_write_coreregister_u32(target
, address
, 0);
324 if (retval
!= ERROR_OK
)
326 retval
= cortex_a_exec_opcode(target
, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL
);
327 if (retval
!= ERROR_OK
)
330 retval
= mem_ap_sel_read_buf(swjdp
, armv7a
->memory_ap
,
331 (uint8_t *)(®file
[1]), 4, 15, address
);
336 static int cortex_a_dap_read_coreregister_u32(struct target
*target
,
337 uint32_t *value
, int regnum
)
339 int retval
= ERROR_OK
;
340 uint8_t reg
= regnum
&0xFF;
342 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
343 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
349 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
350 retval
= cortex_a_exec_opcode(target
,
351 ARMV4_5_MCR(14, 0, reg
, 0, 5, 0),
353 if (retval
!= ERROR_OK
)
355 } else if (reg
== 15) {
356 /* "MOV r0, r15"; then move r0 to DCCTX */
357 retval
= cortex_a_exec_opcode(target
, 0xE1A0000F, &dscr
);
358 if (retval
!= ERROR_OK
)
360 retval
= cortex_a_exec_opcode(target
,
361 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
363 if (retval
!= ERROR_OK
)
366 /* "MRS r0, CPSR" or "MRS r0, SPSR"
367 * then move r0 to DCCTX
369 retval
= cortex_a_exec_opcode(target
, ARMV4_5_MRS(0, reg
& 1), &dscr
);
370 if (retval
!= ERROR_OK
)
372 retval
= cortex_a_exec_opcode(target
,
373 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
375 if (retval
!= ERROR_OK
)
379 /* Wait for DTRRXfull then read DTRRTX */
380 long long then
= timeval_ms();
381 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
382 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
383 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
384 if (retval
!= ERROR_OK
)
386 if (timeval_ms() > then
+ 1000) {
387 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
392 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
393 armv7a
->debug_base
+ CPUDBG_DTRTX
, value
);
394 LOG_DEBUG("read DCC 0x%08" PRIx32
, *value
);
399 static int cortex_a_dap_write_coreregister_u32(struct target
*target
,
400 uint32_t value
, int regnum
)
402 int retval
= ERROR_OK
;
403 uint8_t Rd
= regnum
&0xFF;
405 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
406 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
408 LOG_DEBUG("register %i, value 0x%08" PRIx32
, regnum
, value
);
410 /* Check that DCCRX is not full */
411 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
412 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
413 if (retval
!= ERROR_OK
)
415 if (dscr
& DSCR_DTR_RX_FULL
) {
416 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
417 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
418 retval
= cortex_a_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
420 if (retval
!= ERROR_OK
)
427 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
428 LOG_DEBUG("write DCC 0x%08" PRIx32
, value
);
429 retval
= mem_ap_sel_write_u32(swjdp
, armv7a
->debug_ap
,
430 armv7a
->debug_base
+ CPUDBG_DTRRX
, value
);
431 if (retval
!= ERROR_OK
)
435 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
436 retval
= cortex_a_exec_opcode(target
, ARMV4_5_MRC(14, 0, Rd
, 0, 5, 0),
439 if (retval
!= ERROR_OK
)
441 } else if (Rd
== 15) {
442 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
445 retval
= cortex_a_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
447 if (retval
!= ERROR_OK
)
449 retval
= cortex_a_exec_opcode(target
, 0xE1A0F000, &dscr
);
450 if (retval
!= ERROR_OK
)
453 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
454 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
456 retval
= cortex_a_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
458 if (retval
!= ERROR_OK
)
460 retval
= cortex_a_exec_opcode(target
, ARMV4_5_MSR_GP(0, 0xF, Rd
& 1),
462 if (retval
!= ERROR_OK
)
465 /* "Prefetch flush" after modifying execution status in CPSR */
467 retval
= cortex_a_exec_opcode(target
,
468 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
470 if (retval
!= ERROR_OK
)
478 /* Write to memory mapped registers directly with no cache or mmu handling */
479 static int cortex_a_dap_write_memap_register_u32(struct target
*target
,
484 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
485 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
487 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
, address
, value
);
493 * Cortex-A implementation of Debug Programmer's Model
495 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
496 * so there's no need to poll for it before executing an instruction.
498 * NOTE that in several of these cases the "stall" mode might be useful.
499 * It'd let us queue a few operations together... prepare/finish might
500 * be the places to enable/disable that mode.
503 static inline struct cortex_a_common
*dpm_to_a(struct arm_dpm
*dpm
)
505 return container_of(dpm
, struct cortex_a_common
, armv7a_common
.dpm
);
508 static int cortex_a_write_dcc(struct cortex_a_common
*a
, uint32_t data
)
510 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
511 return mem_ap_sel_write_u32(a
->armv7a_common
.arm
.dap
,
512 a
->armv7a_common
.debug_ap
, a
->armv7a_common
.debug_base
+ CPUDBG_DTRRX
, data
);
515 static int cortex_a_read_dcc(struct cortex_a_common
*a
, uint32_t *data
,
518 struct adiv5_dap
*swjdp
= a
->armv7a_common
.arm
.dap
;
519 uint32_t dscr
= DSCR_INSTR_COMP
;
525 /* Wait for DTRRXfull */
526 long long then
= timeval_ms();
527 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
528 retval
= mem_ap_sel_read_atomic_u32(swjdp
, a
->armv7a_common
.debug_ap
,
529 a
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
531 if (retval
!= ERROR_OK
)
533 if (timeval_ms() > then
+ 1000) {
534 LOG_ERROR("Timeout waiting for read dcc");
539 retval
= mem_ap_sel_read_atomic_u32(swjdp
, a
->armv7a_common
.debug_ap
,
540 a
->armv7a_common
.debug_base
+ CPUDBG_DTRTX
, data
);
541 if (retval
!= ERROR_OK
)
543 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
551 static int cortex_a_dpm_prepare(struct arm_dpm
*dpm
)
553 struct cortex_a_common
*a
= dpm_to_a(dpm
);
554 struct adiv5_dap
*swjdp
= a
->armv7a_common
.arm
.dap
;
558 /* set up invariant: INSTR_COMP is set after ever DPM operation */
559 long long then
= timeval_ms();
561 retval
= mem_ap_sel_read_atomic_u32(swjdp
, a
->armv7a_common
.debug_ap
,
562 a
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
564 if (retval
!= ERROR_OK
)
566 if ((dscr
& DSCR_INSTR_COMP
) != 0)
568 if (timeval_ms() > then
+ 1000) {
569 LOG_ERROR("Timeout waiting for dpm prepare");
574 /* this "should never happen" ... */
575 if (dscr
& DSCR_DTR_RX_FULL
) {
576 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
578 retval
= cortex_a_exec_opcode(
579 a
->armv7a_common
.arm
.target
,
580 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
582 if (retval
!= ERROR_OK
)
589 static int cortex_a_dpm_finish(struct arm_dpm
*dpm
)
591 /* REVISIT what could be done here? */
595 static int cortex_a_instr_write_data_dcc(struct arm_dpm
*dpm
,
596 uint32_t opcode
, uint32_t data
)
598 struct cortex_a_common
*a
= dpm_to_a(dpm
);
600 uint32_t dscr
= DSCR_INSTR_COMP
;
602 retval
= cortex_a_write_dcc(a
, data
);
603 if (retval
!= ERROR_OK
)
606 return cortex_a_exec_opcode(
607 a
->armv7a_common
.arm
.target
,
612 static int cortex_a_instr_write_data_r0(struct arm_dpm
*dpm
,
613 uint32_t opcode
, uint32_t data
)
615 struct cortex_a_common
*a
= dpm_to_a(dpm
);
616 uint32_t dscr
= DSCR_INSTR_COMP
;
619 retval
= cortex_a_write_dcc(a
, data
);
620 if (retval
!= ERROR_OK
)
623 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
624 retval
= cortex_a_exec_opcode(
625 a
->armv7a_common
.arm
.target
,
626 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
628 if (retval
!= ERROR_OK
)
631 /* then the opcode, taking data from R0 */
632 retval
= cortex_a_exec_opcode(
633 a
->armv7a_common
.arm
.target
,
640 static int cortex_a_instr_cpsr_sync(struct arm_dpm
*dpm
)
642 struct target
*target
= dpm
->arm
->target
;
643 uint32_t dscr
= DSCR_INSTR_COMP
;
645 /* "Prefetch flush" after modifying execution status in CPSR */
646 return cortex_a_exec_opcode(target
,
647 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
651 static int cortex_a_instr_read_data_dcc(struct arm_dpm
*dpm
,
652 uint32_t opcode
, uint32_t *data
)
654 struct cortex_a_common
*a
= dpm_to_a(dpm
);
656 uint32_t dscr
= DSCR_INSTR_COMP
;
658 /* the opcode, writing data to DCC */
659 retval
= cortex_a_exec_opcode(
660 a
->armv7a_common
.arm
.target
,
663 if (retval
!= ERROR_OK
)
666 return cortex_a_read_dcc(a
, data
, &dscr
);
670 static int cortex_a_instr_read_data_r0(struct arm_dpm
*dpm
,
671 uint32_t opcode
, uint32_t *data
)
673 struct cortex_a_common
*a
= dpm_to_a(dpm
);
674 uint32_t dscr
= DSCR_INSTR_COMP
;
677 /* the opcode, writing data to R0 */
678 retval
= cortex_a_exec_opcode(
679 a
->armv7a_common
.arm
.target
,
682 if (retval
!= ERROR_OK
)
685 /* write R0 to DCC */
686 retval
= cortex_a_exec_opcode(
687 a
->armv7a_common
.arm
.target
,
688 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
690 if (retval
!= ERROR_OK
)
693 return cortex_a_read_dcc(a
, data
, &dscr
);
696 static int cortex_a_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
697 uint32_t addr
, uint32_t control
)
699 struct cortex_a_common
*a
= dpm_to_a(dpm
);
700 uint32_t vr
= a
->armv7a_common
.debug_base
;
701 uint32_t cr
= a
->armv7a_common
.debug_base
;
705 case 0 ... 15: /* breakpoints */
706 vr
+= CPUDBG_BVR_BASE
;
707 cr
+= CPUDBG_BCR_BASE
;
709 case 16 ... 31: /* watchpoints */
710 vr
+= CPUDBG_WVR_BASE
;
711 cr
+= CPUDBG_WCR_BASE
;
720 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
721 (unsigned) vr
, (unsigned) cr
);
723 retval
= cortex_a_dap_write_memap_register_u32(dpm
->arm
->target
,
725 if (retval
!= ERROR_OK
)
727 retval
= cortex_a_dap_write_memap_register_u32(dpm
->arm
->target
,
732 static int cortex_a_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
734 struct cortex_a_common
*a
= dpm_to_a(dpm
);
739 cr
= a
->armv7a_common
.debug_base
+ CPUDBG_BCR_BASE
;
742 cr
= a
->armv7a_common
.debug_base
+ CPUDBG_WCR_BASE
;
750 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr
);
752 /* clear control register */
753 return cortex_a_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
756 static int cortex_a_dpm_setup(struct cortex_a_common
*a
, uint32_t didr
)
758 struct arm_dpm
*dpm
= &a
->armv7a_common
.dpm
;
761 dpm
->arm
= &a
->armv7a_common
.arm
;
764 dpm
->prepare
= cortex_a_dpm_prepare
;
765 dpm
->finish
= cortex_a_dpm_finish
;
767 dpm
->instr_write_data_dcc
= cortex_a_instr_write_data_dcc
;
768 dpm
->instr_write_data_r0
= cortex_a_instr_write_data_r0
;
769 dpm
->instr_cpsr_sync
= cortex_a_instr_cpsr_sync
;
771 dpm
->instr_read_data_dcc
= cortex_a_instr_read_data_dcc
;
772 dpm
->instr_read_data_r0
= cortex_a_instr_read_data_r0
;
774 dpm
->bpwp_enable
= cortex_a_bpwp_enable
;
775 dpm
->bpwp_disable
= cortex_a_bpwp_disable
;
777 retval
= arm_dpm_setup(dpm
);
778 if (retval
== ERROR_OK
)
779 retval
= arm_dpm_initialize(dpm
);
783 static struct target
*get_cortex_a(struct target
*target
, int32_t coreid
)
785 struct target_list
*head
;
789 while (head
!= (struct target_list
*)NULL
) {
791 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
797 static int cortex_a_halt(struct target
*target
);
799 static int cortex_a_halt_smp(struct target
*target
)
802 struct target_list
*head
;
805 while (head
!= (struct target_list
*)NULL
) {
807 if ((curr
!= target
) && (curr
->state
!= TARGET_HALTED
))
808 retval
+= cortex_a_halt(curr
);
814 static int update_halt_gdb(struct target
*target
)
817 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
818 target
->gdb_service
->target
= target
;
819 target
->gdb_service
->core
[0] = target
->coreid
;
820 retval
+= cortex_a_halt_smp(target
);
826 * Cortex-A Run control
829 static int cortex_a_poll(struct target
*target
)
831 int retval
= ERROR_OK
;
833 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
834 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
835 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
836 enum target_state prev_target_state
= target
->state
;
837 /* toggle to another core is done by gdb as follow */
838 /* maint packet J core_id */
840 /* the next polling trigger an halt event sent to gdb */
841 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
842 (target
->gdb_service
) &&
843 (target
->gdb_service
->target
== NULL
)) {
844 target
->gdb_service
->target
=
845 get_cortex_a(target
, target
->gdb_service
->core
[1]);
846 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
849 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
850 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
851 if (retval
!= ERROR_OK
)
853 cortex_a
->cpudbg_dscr
= dscr
;
855 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
)) {
856 if (prev_target_state
!= TARGET_HALTED
) {
857 /* We have a halting debug event */
858 LOG_DEBUG("Target halted");
859 target
->state
= TARGET_HALTED
;
860 if ((prev_target_state
== TARGET_RUNNING
)
861 || (prev_target_state
== TARGET_UNKNOWN
)
862 || (prev_target_state
== TARGET_RESET
)) {
863 retval
= cortex_a_debug_entry(target
);
864 if (retval
!= ERROR_OK
)
867 retval
= update_halt_gdb(target
);
868 if (retval
!= ERROR_OK
)
871 target_call_event_callbacks(target
,
872 TARGET_EVENT_HALTED
);
874 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
877 retval
= cortex_a_debug_entry(target
);
878 if (retval
!= ERROR_OK
)
881 retval
= update_halt_gdb(target
);
882 if (retval
!= ERROR_OK
)
886 target_call_event_callbacks(target
,
887 TARGET_EVENT_DEBUG_HALTED
);
890 } else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
891 target
->state
= TARGET_RUNNING
;
893 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
894 target
->state
= TARGET_UNKNOWN
;
900 static int cortex_a_halt(struct target
*target
)
902 int retval
= ERROR_OK
;
904 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
905 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
908 * Tell the core to be halted by writing DRCR with 0x1
909 * and then wait for the core to be halted.
911 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
912 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_HALT
);
913 if (retval
!= ERROR_OK
)
917 * enter halting debug mode
919 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
920 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
921 if (retval
!= ERROR_OK
)
924 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
925 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
| DSCR_HALT_DBG_MODE
);
926 if (retval
!= ERROR_OK
)
929 long long then
= timeval_ms();
931 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
932 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
933 if (retval
!= ERROR_OK
)
935 if ((dscr
& DSCR_CORE_HALTED
) != 0)
937 if (timeval_ms() > then
+ 1000) {
938 LOG_ERROR("Timeout waiting for halt");
943 target
->debug_reason
= DBG_REASON_DBGRQ
;
948 static int cortex_a_internal_restore(struct target
*target
, int current
,
949 uint32_t *address
, int handle_breakpoints
, int debug_execution
)
951 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
952 struct arm
*arm
= &armv7a
->arm
;
956 if (!debug_execution
)
957 target_free_all_working_areas(target
);
960 if (debug_execution
) {
961 /* Disable interrupts */
962 /* We disable interrupts in the PRIMASK register instead of
963 * masking with C_MASKINTS,
964 * This is probably the same issue as Cortex-M3 Errata 377493:
965 * C_MASKINTS in parallel with disabled interrupts can cause
966 * local faults to not be taken. */
967 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].value
, 0, 32, 1);
968 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].dirty
= 1;
969 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].valid
= 1;
971 /* Make sure we are in Thumb mode */
972 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32,
973 buf_get_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0,
975 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].dirty
= 1;
976 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].valid
= 1;
980 /* current = 1: continue on current pc, otherwise continue at <address> */
981 resume_pc
= buf_get_u32(arm
->pc
->value
, 0, 32);
983 resume_pc
= *address
;
985 *address
= resume_pc
;
987 /* Make sure that the Armv7 gdb thumb fixups does not
988 * kill the return address
990 switch (arm
->core_state
) {
992 resume_pc
&= 0xFFFFFFFC;
994 case ARM_STATE_THUMB
:
995 case ARM_STATE_THUMB_EE
:
996 /* When the return address is loaded into PC
997 * bit 0 must be 1 to stay in Thumb state
1001 case ARM_STATE_JAZELLE
:
1002 LOG_ERROR("How do I resume into Jazelle state??");
1005 LOG_DEBUG("resume pc = 0x%08" PRIx32
, resume_pc
);
1006 buf_set_u32(arm
->pc
->value
, 0, 32, resume_pc
);
1009 /* restore dpm_mode at system halt */
1010 dpm_modeswitch(&armv7a
->dpm
, ARM_MODE_ANY
);
1011 /* called it now before restoring context because it uses cpu
1012 * register r0 for restoring cp15 control register */
1013 retval
= cortex_a_restore_cp15_control_reg(target
);
1014 if (retval
!= ERROR_OK
)
1016 retval
= cortex_a_restore_context(target
, handle_breakpoints
);
1017 if (retval
!= ERROR_OK
)
1019 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1020 target
->state
= TARGET_RUNNING
;
1022 /* registers are now invalid */
1023 register_cache_invalidate(arm
->core_cache
);
1026 /* the front-end may request us not to handle breakpoints */
1027 if (handle_breakpoints
) {
1028 /* Single step past breakpoint at current address */
1029 breakpoint
= breakpoint_find(target
, resume_pc
);
1031 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
1032 cortex_m3_unset_breakpoint(target
, breakpoint
);
1033 cortex_m3_single_step_core(target
);
1034 cortex_m3_set_breakpoint(target
, breakpoint
);
1042 static int cortex_a_internal_restart(struct target
*target
)
1044 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1045 struct arm
*arm
= &armv7a
->arm
;
1046 struct adiv5_dap
*swjdp
= arm
->dap
;
1050 * * Restart core and wait for it to be started. Clear ITRen and sticky
1051 * * exception flags: see ARMv7 ARM, C5.9.
1053 * REVISIT: for single stepping, we probably want to
1054 * disable IRQs by default, with optional override...
1057 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
1058 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1059 if (retval
!= ERROR_OK
)
1062 if ((dscr
& DSCR_INSTR_COMP
) == 0)
1063 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1065 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1066 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
& ~DSCR_ITR_EN
);
1067 if (retval
!= ERROR_OK
)
1070 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1071 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_RESTART
|
1072 DRCR_CLEAR_EXCEPTIONS
);
1073 if (retval
!= ERROR_OK
)
1076 long long then
= timeval_ms();
1078 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
1079 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1080 if (retval
!= ERROR_OK
)
1082 if ((dscr
& DSCR_CORE_RESTARTED
) != 0)
1084 if (timeval_ms() > then
+ 1000) {
1085 LOG_ERROR("Timeout waiting for resume");
1090 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1091 target
->state
= TARGET_RUNNING
;
1093 /* registers are now invalid */
1094 register_cache_invalidate(arm
->core_cache
);
1099 static int cortex_a_restore_smp(struct target
*target
, int handle_breakpoints
)
1102 struct target_list
*head
;
1103 struct target
*curr
;
1105 head
= target
->head
;
1106 while (head
!= (struct target_list
*)NULL
) {
1107 curr
= head
->target
;
1108 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
1109 /* resume current address , not in step mode */
1110 retval
+= cortex_a_internal_restore(curr
, 1, &address
,
1111 handle_breakpoints
, 0);
1112 retval
+= cortex_a_internal_restart(curr
);
1120 static int cortex_a_resume(struct target
*target
, int current
,
1121 uint32_t address
, int handle_breakpoints
, int debug_execution
)
1124 /* dummy resume for smp toggle in order to reduce gdb impact */
1125 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
1126 /* simulate a start and halt of target */
1127 target
->gdb_service
->target
= NULL
;
1128 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
1129 /* fake resume at next poll we play the target core[1], see poll*/
1130 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1133 cortex_a_internal_restore(target
, current
, &address
, handle_breakpoints
, debug_execution
);
1135 target
->gdb_service
->core
[0] = -1;
1136 retval
= cortex_a_restore_smp(target
, handle_breakpoints
);
1137 if (retval
!= ERROR_OK
)
1140 cortex_a_internal_restart(target
);
1142 if (!debug_execution
) {
1143 target
->state
= TARGET_RUNNING
;
1144 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1145 LOG_DEBUG("target resumed at 0x%" PRIx32
, address
);
1147 target
->state
= TARGET_DEBUG_RUNNING
;
1148 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1149 LOG_DEBUG("target debug resumed at 0x%" PRIx32
, address
);
1155 static int cortex_a_debug_entry(struct target
*target
)
1158 uint32_t regfile
[16], cpsr
, dscr
;
1159 int retval
= ERROR_OK
;
1160 struct working_area
*regfile_working_area
= NULL
;
1161 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1162 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1163 struct arm
*arm
= &armv7a
->arm
;
1164 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
1167 LOG_DEBUG("dscr = 0x%08" PRIx32
, cortex_a
->cpudbg_dscr
);
1169 /* REVISIT surely we should not re-read DSCR !! */
1170 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
1171 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1172 if (retval
!= ERROR_OK
)
1175 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1176 * imprecise data aborts get discarded by issuing a Data
1177 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1180 /* Enable the ITR execution once we are in debug mode */
1181 dscr
|= DSCR_ITR_EN
;
1182 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1183 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
1184 if (retval
!= ERROR_OK
)
1187 /* Examine debug reason */
1188 arm_dpm_report_dscr(&armv7a
->dpm
, cortex_a
->cpudbg_dscr
);
1190 /* save address of instruction that triggered the watchpoint? */
1191 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
1194 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
1195 armv7a
->debug_base
+ CPUDBG_WFAR
,
1197 if (retval
!= ERROR_OK
)
1199 arm_dpm_report_wfar(&armv7a
->dpm
, wfar
);
1202 /* REVISIT fast_reg_read is never set ... */
1204 /* Examine target state and mode */
1205 if (cortex_a
->fast_reg_read
)
1206 target_alloc_working_area(target
, 64, ®file_working_area
);
1208 /* First load register acessible through core debug port*/
1209 if (!regfile_working_area
)
1210 retval
= arm_dpm_read_current_registers(&armv7a
->dpm
);
1212 retval
= cortex_a_read_regs_through_mem(target
,
1213 regfile_working_area
->address
, regfile
);
1215 target_free_working_area(target
, regfile_working_area
);
1216 if (retval
!= ERROR_OK
)
1219 /* read Current PSR */
1220 retval
= cortex_a_dap_read_coreregister_u32(target
, &cpsr
, 16);
1221 /* store current cpsr */
1222 if (retval
!= ERROR_OK
)
1225 LOG_DEBUG("cpsr: %8.8" PRIx32
, cpsr
);
1227 arm_set_cpsr(arm
, cpsr
);
1230 for (i
= 0; i
<= ARM_PC
; i
++) {
1231 reg
= arm_reg_current(arm
, i
);
1233 buf_set_u32(reg
->value
, 0, 32, regfile
[i
]);
1238 /* Fixup PC Resume Address */
1239 if (cpsr
& (1 << 5)) {
1240 /* T bit set for Thumb or ThumbEE state */
1241 regfile
[ARM_PC
] -= 4;
1244 regfile
[ARM_PC
] -= 8;
1248 buf_set_u32(reg
->value
, 0, 32, regfile
[ARM_PC
]);
1249 reg
->dirty
= reg
->valid
;
1253 /* TODO, Move this */
1254 uint32_t cp15_control_register
, cp15_cacr
, cp15_nacr
;
1255 cortex_a_read_cp(target
, &cp15_control_register
, 15, 0, 1, 0, 0);
1256 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register
);
1258 cortex_a_read_cp(target
, &cp15_cacr
, 15, 0, 1, 0, 2);
1259 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr
);
1261 cortex_a_read_cp(target
, &cp15_nacr
, 15, 0, 1, 1, 2);
1262 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr
);
1265 /* Are we in an exception handler */
1266 /* armv4_5->exception_number = 0; */
1267 if (armv7a
->post_debug_entry
) {
1268 retval
= armv7a
->post_debug_entry(target
);
1269 if (retval
!= ERROR_OK
)
1276 static int cortex_a_post_debug_entry(struct target
*target
)
1278 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1279 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
1282 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1283 retval
= armv7a
->arm
.mrc(target
, 15,
1284 0, 0, /* op1, op2 */
1285 1, 0, /* CRn, CRm */
1286 &cortex_a
->cp15_control_reg
);
1287 if (retval
!= ERROR_OK
)
1289 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32
, cortex_a
->cp15_control_reg
);
1290 cortex_a
->cp15_control_reg_curr
= cortex_a
->cp15_control_reg
;
1292 if (armv7a
->armv7a_mmu
.armv7a_cache
.ctype
== -1)
1293 armv7a_identify_cache(target
);
1295 if (armv7a
->is_armv7r
) {
1296 armv7a
->armv7a_mmu
.mmu_enabled
= 0;
1298 armv7a
->armv7a_mmu
.mmu_enabled
=
1299 (cortex_a
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1301 armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
=
1302 (cortex_a
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1303 armv7a
->armv7a_mmu
.armv7a_cache
.i_cache_enabled
=
1304 (cortex_a
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1305 cortex_a
->curr_mode
= armv7a
->arm
.core_mode
;
1310 static int cortex_a_step(struct target
*target
, int current
, uint32_t address
,
1311 int handle_breakpoints
)
1313 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1314 struct arm
*arm
= &armv7a
->arm
;
1315 struct breakpoint
*breakpoint
= NULL
;
1316 struct breakpoint stepbreakpoint
;
1320 if (target
->state
!= TARGET_HALTED
) {
1321 LOG_WARNING("target not halted");
1322 return ERROR_TARGET_NOT_HALTED
;
1325 /* current = 1: continue on current pc, otherwise continue at <address> */
1328 buf_set_u32(r
->value
, 0, 32, address
);
1330 address
= buf_get_u32(r
->value
, 0, 32);
1332 /* The front-end may request us not to handle breakpoints.
1333 * But since Cortex-A uses breakpoint for single step,
1334 * we MUST handle breakpoints.
1336 handle_breakpoints
= 1;
1337 if (handle_breakpoints
) {
1338 breakpoint
= breakpoint_find(target
, address
);
1340 cortex_a_unset_breakpoint(target
, breakpoint
);
1343 /* Setup single step breakpoint */
1344 stepbreakpoint
.address
= address
;
1345 stepbreakpoint
.length
= (arm
->core_state
== ARM_STATE_THUMB
)
1347 stepbreakpoint
.type
= BKPT_HARD
;
1348 stepbreakpoint
.set
= 0;
1350 /* Break on IVA mismatch */
1351 cortex_a_set_breakpoint(target
, &stepbreakpoint
, 0x04);
1353 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1355 retval
= cortex_a_resume(target
, 1, address
, 0, 0);
1356 if (retval
!= ERROR_OK
)
1359 long long then
= timeval_ms();
1360 while (target
->state
!= TARGET_HALTED
) {
1361 retval
= cortex_a_poll(target
);
1362 if (retval
!= ERROR_OK
)
1364 if (timeval_ms() > then
+ 1000) {
1365 LOG_ERROR("timeout waiting for target halt");
1370 cortex_a_unset_breakpoint(target
, &stepbreakpoint
);
1372 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1375 cortex_a_set_breakpoint(target
, breakpoint
, 0);
1377 if (target
->state
!= TARGET_HALTED
)
1378 LOG_DEBUG("target stepped");
1383 static int cortex_a_restore_context(struct target
*target
, bool bpwp
)
1385 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1389 if (armv7a
->pre_restore_context
)
1390 armv7a
->pre_restore_context(target
);
1392 return arm_dpm_write_dirty_registers(&armv7a
->dpm
, bpwp
);
1396 * Cortex-A Breakpoint and watchpoint functions
1399 /* Setup hardware Breakpoint Register Pair */
1400 static int cortex_a_set_breakpoint(struct target
*target
,
1401 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1406 uint8_t byte_addr_select
= 0x0F;
1407 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1408 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
1409 struct cortex_a_brp
*brp_list
= cortex_a
->brp_list
;
1411 if (breakpoint
->set
) {
1412 LOG_WARNING("breakpoint already set");
1416 if (breakpoint
->type
== BKPT_HARD
) {
1417 while (brp_list
[brp_i
].used
&& (brp_i
< cortex_a
->brp_num
))
1419 if (brp_i
>= cortex_a
->brp_num
) {
1420 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1421 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1423 breakpoint
->set
= brp_i
+ 1;
1424 if (breakpoint
->length
== 2)
1425 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1426 control
= ((matchmode
& 0x7) << 20)
1427 | (byte_addr_select
<< 5)
1429 brp_list
[brp_i
].used
= 1;
1430 brp_list
[brp_i
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1431 brp_list
[brp_i
].control
= control
;
1432 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1433 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1434 brp_list
[brp_i
].value
);
1435 if (retval
!= ERROR_OK
)
1437 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1438 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1439 brp_list
[brp_i
].control
);
1440 if (retval
!= ERROR_OK
)
1442 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1443 brp_list
[brp_i
].control
,
1444 brp_list
[brp_i
].value
);
1445 } else if (breakpoint
->type
== BKPT_SOFT
) {
1447 if (breakpoint
->length
== 2)
1448 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1450 buf_set_u32(code
, 0, 32, ARMV5_BKPT(0x11));
1451 retval
= target_read_memory(target
,
1452 breakpoint
->address
& 0xFFFFFFFE,
1453 breakpoint
->length
, 1,
1454 breakpoint
->orig_instr
);
1455 if (retval
!= ERROR_OK
)
1457 retval
= target_write_memory(target
,
1458 breakpoint
->address
& 0xFFFFFFFE,
1459 breakpoint
->length
, 1, code
);
1460 if (retval
!= ERROR_OK
)
1462 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1468 static int cortex_a_set_context_breakpoint(struct target
*target
,
1469 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1471 int retval
= ERROR_FAIL
;
1474 uint8_t byte_addr_select
= 0x0F;
1475 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1476 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
1477 struct cortex_a_brp
*brp_list
= cortex_a
->brp_list
;
1479 if (breakpoint
->set
) {
1480 LOG_WARNING("breakpoint already set");
1483 /*check available context BRPs*/
1484 while ((brp_list
[brp_i
].used
||
1485 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< cortex_a
->brp_num
))
1488 if (brp_i
>= cortex_a
->brp_num
) {
1489 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1493 breakpoint
->set
= brp_i
+ 1;
1494 control
= ((matchmode
& 0x7) << 20)
1495 | (byte_addr_select
<< 5)
1497 brp_list
[brp_i
].used
= 1;
1498 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1499 brp_list
[brp_i
].control
= control
;
1500 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1501 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1502 brp_list
[brp_i
].value
);
1503 if (retval
!= ERROR_OK
)
1505 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1506 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1507 brp_list
[brp_i
].control
);
1508 if (retval
!= ERROR_OK
)
1510 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1511 brp_list
[brp_i
].control
,
1512 brp_list
[brp_i
].value
);
1517 static int cortex_a_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1519 int retval
= ERROR_FAIL
;
1520 int brp_1
= 0; /* holds the contextID pair */
1521 int brp_2
= 0; /* holds the IVA pair */
1522 uint32_t control_CTX
, control_IVA
;
1523 uint8_t CTX_byte_addr_select
= 0x0F;
1524 uint8_t IVA_byte_addr_select
= 0x0F;
1525 uint8_t CTX_machmode
= 0x03;
1526 uint8_t IVA_machmode
= 0x01;
1527 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1528 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
1529 struct cortex_a_brp
*brp_list
= cortex_a
->brp_list
;
1531 if (breakpoint
->set
) {
1532 LOG_WARNING("breakpoint already set");
1535 /*check available context BRPs*/
1536 while ((brp_list
[brp_1
].used
||
1537 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< cortex_a
->brp_num
))
1540 printf("brp(CTX) found num: %d\n", brp_1
);
1541 if (brp_1
>= cortex_a
->brp_num
) {
1542 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1546 while ((brp_list
[brp_2
].used
||
1547 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< cortex_a
->brp_num
))
1550 printf("brp(IVA) found num: %d\n", brp_2
);
1551 if (brp_2
>= cortex_a
->brp_num
) {
1552 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1556 breakpoint
->set
= brp_1
+ 1;
1557 breakpoint
->linked_BRP
= brp_2
;
1558 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1561 | (CTX_byte_addr_select
<< 5)
1563 brp_list
[brp_1
].used
= 1;
1564 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1565 brp_list
[brp_1
].control
= control_CTX
;
1566 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1567 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_1
].BRPn
,
1568 brp_list
[brp_1
].value
);
1569 if (retval
!= ERROR_OK
)
1571 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1572 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_1
].BRPn
,
1573 brp_list
[brp_1
].control
);
1574 if (retval
!= ERROR_OK
)
1577 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1579 | (IVA_byte_addr_select
<< 5)
1581 brp_list
[brp_2
].used
= 1;
1582 brp_list
[brp_2
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1583 brp_list
[brp_2
].control
= control_IVA
;
1584 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1585 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_2
].BRPn
,
1586 brp_list
[brp_2
].value
);
1587 if (retval
!= ERROR_OK
)
1589 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1590 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_2
].BRPn
,
1591 brp_list
[brp_2
].control
);
1592 if (retval
!= ERROR_OK
)
1598 static int cortex_a_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1601 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1602 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
1603 struct cortex_a_brp
*brp_list
= cortex_a
->brp_list
;
1605 if (!breakpoint
->set
) {
1606 LOG_WARNING("breakpoint not set");
1610 if (breakpoint
->type
== BKPT_HARD
) {
1611 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1612 int brp_i
= breakpoint
->set
- 1;
1613 int brp_j
= breakpoint
->linked_BRP
;
1614 if ((brp_i
< 0) || (brp_i
>= cortex_a
->brp_num
)) {
1615 LOG_DEBUG("Invalid BRP number in breakpoint");
1618 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1619 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1620 brp_list
[brp_i
].used
= 0;
1621 brp_list
[brp_i
].value
= 0;
1622 brp_list
[brp_i
].control
= 0;
1623 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1624 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1625 brp_list
[brp_i
].control
);
1626 if (retval
!= ERROR_OK
)
1628 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1629 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1630 brp_list
[brp_i
].value
);
1631 if (retval
!= ERROR_OK
)
1633 if ((brp_j
< 0) || (brp_j
>= cortex_a
->brp_num
)) {
1634 LOG_DEBUG("Invalid BRP number in breakpoint");
1637 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_j
,
1638 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1639 brp_list
[brp_j
].used
= 0;
1640 brp_list
[brp_j
].value
= 0;
1641 brp_list
[brp_j
].control
= 0;
1642 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1643 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_j
].BRPn
,
1644 brp_list
[brp_j
].control
);
1645 if (retval
!= ERROR_OK
)
1647 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1648 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_j
].BRPn
,
1649 brp_list
[brp_j
].value
);
1650 if (retval
!= ERROR_OK
)
1652 breakpoint
->linked_BRP
= 0;
1653 breakpoint
->set
= 0;
1657 int brp_i
= breakpoint
->set
- 1;
1658 if ((brp_i
< 0) || (brp_i
>= cortex_a
->brp_num
)) {
1659 LOG_DEBUG("Invalid BRP number in breakpoint");
1662 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1663 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1664 brp_list
[brp_i
].used
= 0;
1665 brp_list
[brp_i
].value
= 0;
1666 brp_list
[brp_i
].control
= 0;
1667 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1668 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1669 brp_list
[brp_i
].control
);
1670 if (retval
!= ERROR_OK
)
1672 retval
= cortex_a_dap_write_memap_register_u32(target
, armv7a
->debug_base
1673 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1674 brp_list
[brp_i
].value
);
1675 if (retval
!= ERROR_OK
)
1677 breakpoint
->set
= 0;
1681 /* restore original instruction (kept in target endianness) */
1682 if (breakpoint
->length
== 4) {
1683 retval
= target_write_memory(target
,
1684 breakpoint
->address
& 0xFFFFFFFE,
1685 4, 1, breakpoint
->orig_instr
);
1686 if (retval
!= ERROR_OK
)
1689 retval
= target_write_memory(target
,
1690 breakpoint
->address
& 0xFFFFFFFE,
1691 2, 1, breakpoint
->orig_instr
);
1692 if (retval
!= ERROR_OK
)
1696 breakpoint
->set
= 0;
1701 static int cortex_a_add_breakpoint(struct target
*target
,
1702 struct breakpoint
*breakpoint
)
1704 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1706 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a
->brp_num_available
< 1)) {
1707 LOG_INFO("no hardware breakpoint available");
1708 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1711 if (breakpoint
->type
== BKPT_HARD
)
1712 cortex_a
->brp_num_available
--;
1714 return cortex_a_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1717 static int cortex_a_add_context_breakpoint(struct target
*target
,
1718 struct breakpoint
*breakpoint
)
1720 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1722 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a
->brp_num_available
< 1)) {
1723 LOG_INFO("no hardware breakpoint available");
1724 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1727 if (breakpoint
->type
== BKPT_HARD
)
1728 cortex_a
->brp_num_available
--;
1730 return cortex_a_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1733 static int cortex_a_add_hybrid_breakpoint(struct target
*target
,
1734 struct breakpoint
*breakpoint
)
1736 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1738 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a
->brp_num_available
< 1)) {
1739 LOG_INFO("no hardware breakpoint available");
1740 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1743 if (breakpoint
->type
== BKPT_HARD
)
1744 cortex_a
->brp_num_available
--;
1746 return cortex_a_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1750 static int cortex_a_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1752 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
1755 /* It is perfectly possible to remove breakpoints while the target is running */
1756 if (target
->state
!= TARGET_HALTED
) {
1757 LOG_WARNING("target not halted");
1758 return ERROR_TARGET_NOT_HALTED
;
1762 if (breakpoint
->set
) {
1763 cortex_a_unset_breakpoint(target
, breakpoint
);
1764 if (breakpoint
->type
== BKPT_HARD
)
1765 cortex_a
->brp_num_available
++;
1773 * Cortex-A Reset functions
1776 static int cortex_a_assert_reset(struct target
*target
)
1778 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1782 /* FIXME when halt is requested, make it work somehow... */
1784 /* Issue some kind of warm reset. */
1785 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1786 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1787 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1788 /* REVISIT handle "pulls" cases, if there's
1789 * hardware that needs them to work.
1791 jtag_add_reset(0, 1);
1793 LOG_ERROR("%s: how to reset?", target_name(target
));
1797 /* registers are now invalid */
1798 register_cache_invalidate(armv7a
->arm
.core_cache
);
1800 target
->state
= TARGET_RESET
;
1805 static int cortex_a_deassert_reset(struct target
*target
)
1811 /* be certain SRST is off */
1812 jtag_add_reset(0, 0);
1814 retval
= cortex_a_poll(target
);
1815 if (retval
!= ERROR_OK
)
1818 if (target
->reset_halt
) {
1819 if (target
->state
!= TARGET_HALTED
) {
1820 LOG_WARNING("%s: ran after reset and before halt ...",
1821 target_name(target
));
1822 retval
= target_halt(target
);
1823 if (retval
!= ERROR_OK
)
1831 static int cortex_a_write_apb_ab_memory(struct target
*target
,
1832 uint32_t address
, uint32_t size
,
1833 uint32_t count
, const uint8_t *buffer
)
1835 /* write memory through APB-AP */
1837 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1838 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1839 struct arm
*arm
= &armv7a
->arm
;
1840 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
1841 int total_bytes
= count
* size
;
1843 int start_byte
= address
& 0x3;
1844 int end_byte
= (address
+ total_bytes
) & 0x3;
1847 uint8_t *tmp_buff
= NULL
;
1850 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32
" size %" PRIu32
" count%" PRIu32
,
1851 address
, size
, count
);
1852 if (target
->state
!= TARGET_HALTED
) {
1853 LOG_WARNING("target not halted");
1854 return ERROR_TARGET_NOT_HALTED
;
1857 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1859 /* Mark register R0 as dirty, as it will be used
1860 * for transferring the data.
1861 * It will be restored automatically when exiting
1864 reg
= arm_reg_current(arm
, 0);
1867 /* clear any abort */
1868 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
, armv7a
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1869 if (retval
!= ERROR_OK
)
1872 /* This algorithm comes from either :
1873 * Cortex-A TRM Example 12-25
1874 * Cortex-R4 TRM Example 11-26
1875 * (slight differences)
1878 /* The algorithm only copies 32 bit words, so the buffer
1879 * should be expanded to include the words at either end.
1880 * The first and last words will be read first to avoid
1881 * corruption if needed.
1883 tmp_buff
= malloc(total_u32
* 4);
1885 if ((start_byte
!= 0) && (total_u32
> 1)) {
1886 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1887 * the other bytes in the word.
1889 retval
= cortex_a_read_apb_ab_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1890 if (retval
!= ERROR_OK
)
1891 goto error_free_buff_w
;
1894 /* If end of write is not aligned, or the write is less than 4 bytes */
1895 if ((end_byte
!= 0) ||
1896 ((total_u32
== 1) && (total_bytes
!= 4))) {
1897 /* Read the last word to avoid corruption during 32 bit write */
1898 int mem_offset
= (total_u32
-1) * 4;
1899 retval
= cortex_a_read_apb_ab_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1900 if (retval
!= ERROR_OK
)
1901 goto error_free_buff_w
;
1904 /* Copy the write buffer over the top of the temporary buffer */
1905 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1907 /* We now have a 32 bit aligned buffer that can be written */
1910 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
1911 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1912 if (retval
!= ERROR_OK
)
1913 goto error_free_buff_w
;
1915 /* Set DTR mode to Fast (2) */
1916 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_FAST_MODE
;
1917 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1918 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
1919 if (retval
!= ERROR_OK
)
1920 goto error_free_buff_w
;
1922 /* Copy the destination address into R0 */
1923 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1924 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1925 armv7a
->debug_base
+ CPUDBG_ITR
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1926 if (retval
!= ERROR_OK
)
1927 goto error_unset_dtr_w
;
1928 /* Write address into DTRRX, which triggers previous instruction */
1929 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1930 armv7a
->debug_base
+ CPUDBG_DTRRX
, address
& (~0x3));
1931 if (retval
!= ERROR_OK
)
1932 goto error_unset_dtr_w
;
1934 /* Write the data transfer instruction into the ITR
1935 * (STC p14, c5, [R0], 4)
1937 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1938 armv7a
->debug_base
+ CPUDBG_ITR
, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1939 if (retval
!= ERROR_OK
)
1940 goto error_unset_dtr_w
;
1943 retval
= mem_ap_sel_write_buf_noincr(swjdp
, armv7a
->debug_ap
,
1944 tmp_buff
, 4, total_u32
, armv7a
->debug_base
+ CPUDBG_DTRRX
);
1945 if (retval
!= ERROR_OK
)
1946 goto error_unset_dtr_w
;
1949 /* Switch DTR mode back to non-blocking (0) */
1950 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_NON_BLOCKING
;
1951 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1952 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
1953 if (retval
!= ERROR_OK
)
1954 goto error_unset_dtr_w
;
1956 /* Check for sticky abort flags in the DSCR */
1957 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
1958 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1959 if (retval
!= ERROR_OK
)
1960 goto error_free_buff_w
;
1961 if (dscr
& (DSCR_STICKY_ABORT_PRECISE
| DSCR_STICKY_ABORT_IMPRECISE
)) {
1962 /* Abort occurred - clear it and exit */
1963 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1964 mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1965 armv7a
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1966 goto error_free_buff_w
;
1974 /* Unset DTR mode */
1975 mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
1976 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1977 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_NON_BLOCKING
;
1978 mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
1979 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
1986 static int cortex_a_read_apb_ab_memory(struct target
*target
,
1987 uint32_t address
, uint32_t size
,
1988 uint32_t count
, uint8_t *buffer
)
1990 /* read memory through APB-AP */
1992 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1993 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1994 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
1995 struct arm
*arm
= &armv7a
->arm
;
1996 int total_bytes
= count
* size
;
1998 int start_byte
= address
& 0x3;
1999 int end_byte
= (address
+ total_bytes
) & 0x3;
2002 uint8_t *tmp_buff
= NULL
;
2006 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32
" size %" PRIu32
" count%" PRIu32
,
2007 address
, size
, count
);
2008 if (target
->state
!= TARGET_HALTED
) {
2009 LOG_WARNING("target not halted");
2010 return ERROR_TARGET_NOT_HALTED
;
2013 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
2014 /* Mark register R0 as dirty, as it will be used
2015 * for transferring the data.
2016 * It will be restored automatically when exiting
2019 reg
= arm_reg_current(arm
, 0);
2022 /* clear any abort */
2024 mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
, armv7a
->debug_base
+ CPUDBG_DRCR
, 1<<2);
2025 if (retval
!= ERROR_OK
)
2026 goto error_free_buff_r
;
2029 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2030 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2032 /* This algorithm comes from either :
2033 * Cortex-A TRM Example 12-24
2034 * Cortex-R4 TRM Example 11-25
2035 * (slight differences)
2038 /* Set DTR access mode to stall mode b01 */
2039 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_STALL_MODE
;
2040 retval
+= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
2041 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
2043 /* Write R0 with value 'address' using write procedure for stall mode */
2044 /* - Write the address for read access into DTRRX */
2045 retval
+= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
2046 armv7a
->debug_base
+ CPUDBG_DTRRX
, address
& ~0x3);
2047 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
2048 cortex_a_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr
);
2050 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
2051 * and the DTR mode setting to fast mode
2052 * in one combined write (since they are adjacent registers)
2055 target_buffer_set_u32(target
, u8buf_ptr
, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2056 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_FAST_MODE
;
2057 target_buffer_set_u32(target
, u8buf_ptr
+ 4, dscr
);
2058 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2059 retval
+= mem_ap_sel_write_buf(swjdp
, armv7a
->debug_ap
, u8buf_ptr
, 4, 2,
2060 armv7a
->debug_base
+ CPUDBG_ITR
);
2061 if (retval
!= ERROR_OK
)
2062 goto error_unset_dtr_r
;
2064 /* Optimize the read as much as we can, either way we read in a single pass */
2065 if ((start_byte
) || (end_byte
)) {
2066 /* The algorithm only copies 32 bit words, so the buffer
2067 * should be expanded to include the words at either end.
2068 * The first and last words will be read into a temp buffer
2069 * to avoid corruption
2071 tmp_buff
= malloc(total_u32
* 4);
2073 goto error_unset_dtr_r
;
2075 /* use the tmp buffer to read the entire data */
2076 u8buf_ptr
= tmp_buff
;
2078 /* address and read length are aligned so read directely into the passed buffer */
2081 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2082 * Abort flags are sticky, so can be read at end of transactions
2084 * This data is read in aligned to 32 bit boundary.
2086 retval
= mem_ap_sel_read_buf_noincr(swjdp
, armv7a
->debug_ap
, u8buf_ptr
, 4, total_u32
,
2087 armv7a
->debug_base
+ CPUDBG_DTRTX
);
2088 if (retval
!= ERROR_OK
)
2089 goto error_unset_dtr_r
;
2091 /* set DTR access mode back to non blocking b00 */
2092 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_NON_BLOCKING
;
2093 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
2094 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
2095 if (retval
!= ERROR_OK
)
2096 goto error_free_buff_r
;
2098 /* Wait for the final read instruction to finish */
2100 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2101 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2102 if (retval
!= ERROR_OK
)
2103 goto error_free_buff_r
;
2104 } while ((dscr
& DSCR_INSTR_COMP
) == 0);
2106 /* Check for sticky abort flags in the DSCR */
2107 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2108 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2109 if (retval
!= ERROR_OK
)
2110 goto error_free_buff_r
;
2111 if (dscr
& (DSCR_STICKY_ABORT_PRECISE
| DSCR_STICKY_ABORT_IMPRECISE
)) {
2112 /* Abort occurred - clear it and exit */
2113 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2114 mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
2115 armv7a
->debug_base
+ CPUDBG_DRCR
, 1<<2);
2116 goto error_free_buff_r
;
2119 /* check if we need to copy aligned data by applying any shift necessary */
2121 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
2129 /* Unset DTR mode */
2130 mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2131 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2132 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_NON_BLOCKING
;
2133 mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
2134 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
2143 * Cortex-A Memory access
2145 * This is same Cortex M3 but we must also use the correct
2146 * ap number for every access.
2149 static int cortex_a_read_phys_memory(struct target
*target
,
2150 uint32_t address
, uint32_t size
,
2151 uint32_t count
, uint8_t *buffer
)
2153 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2154 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
2155 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2156 uint8_t apsel
= swjdp
->apsel
;
2157 LOG_DEBUG("Reading memory at real address 0x%" PRIx32
"; size %" PRId32
"; count %" PRId32
,
2158 address
, size
, count
);
2160 if (count
&& buffer
) {
2162 if (armv7a
->memory_ap_available
&& (apsel
== armv7a
->memory_ap
)) {
2164 /* read memory through AHB-AP */
2165 retval
= mem_ap_sel_read_buf(swjdp
, armv7a
->memory_ap
, buffer
, size
, count
, address
);
2168 /* read memory through APB-AP */
2169 if (!armv7a
->is_armv7r
) {
2171 retval
= cortex_a_mmu_modify(target
, 0);
2172 if (retval
!= ERROR_OK
)
2175 retval
= cortex_a_read_apb_ab_memory(target
, address
, size
, count
, buffer
);
2181 static int cortex_a_read_memory(struct target
*target
, uint32_t address
,
2182 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2184 int mmu_enabled
= 0;
2185 uint32_t virt
, phys
;
2187 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2188 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
2189 uint8_t apsel
= swjdp
->apsel
;
2191 /* cortex_a handles unaligned memory access */
2192 LOG_DEBUG("Reading memory at address 0x%" PRIx32
"; size %" PRId32
"; count %" PRId32
, address
,
2195 /* determine if MMU was enabled on target stop */
2196 if (!armv7a
->is_armv7r
) {
2197 retval
= cortex_a_mmu(target
, &mmu_enabled
);
2198 if (retval
!= ERROR_OK
)
2202 if (armv7a
->memory_ap_available
&& (apsel
== armv7a
->memory_ap
)) {
2205 retval
= cortex_a_virt2phys(target
, virt
, &phys
);
2206 if (retval
!= ERROR_OK
)
2209 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32
" to r:0x%" PRIx32
,
2213 retval
= cortex_a_read_phys_memory(target
, address
, size
,
2217 retval
= cortex_a_check_address(target
, address
);
2218 if (retval
!= ERROR_OK
)
2220 /* enable MMU as we could have disabled it for phys access */
2221 retval
= cortex_a_mmu_modify(target
, 1);
2222 if (retval
!= ERROR_OK
)
2225 retval
= cortex_a_read_apb_ab_memory(target
, address
, size
, count
, buffer
);
2230 static int cortex_a_write_phys_memory(struct target
*target
,
2231 uint32_t address
, uint32_t size
,
2232 uint32_t count
, const uint8_t *buffer
)
2234 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2235 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
2236 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2237 uint8_t apsel
= swjdp
->apsel
;
2239 LOG_DEBUG("Writing memory to real address 0x%" PRIx32
"; size %" PRId32
"; count %" PRId32
, address
,
2242 if (count
&& buffer
) {
2244 if (armv7a
->memory_ap_available
&& (apsel
== armv7a
->memory_ap
)) {
2246 /* write memory through AHB-AP */
2247 retval
= mem_ap_sel_write_buf(swjdp
, armv7a
->memory_ap
, buffer
, size
, count
, address
);
2250 /* write memory through APB-AP */
2251 if (!armv7a
->is_armv7r
) {
2252 retval
= cortex_a_mmu_modify(target
, 0);
2253 if (retval
!= ERROR_OK
)
2256 return cortex_a_write_apb_ab_memory(target
, address
, size
, count
, buffer
);
2261 /* REVISIT this op is generic ARMv7-A/R stuff */
2262 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
) {
2263 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
2265 retval
= dpm
->prepare(dpm
);
2266 if (retval
!= ERROR_OK
)
2269 /* The Cache handling will NOT work with MMU active, the
2270 * wrong addresses will be invalidated!
2272 * For both ICache and DCache, walk all cache lines in the
2273 * address range. Cortex-A has fixed 64 byte line length.
2275 * REVISIT per ARMv7, these may trigger watchpoints ...
2278 /* invalidate I-Cache */
2279 if (armv7a
->armv7a_mmu
.armv7a_cache
.i_cache_enabled
) {
2280 /* ICIMVAU - Invalidate Cache single entry
2282 * MCR p15, 0, r0, c7, c5, 1
2284 for (uint32_t cacheline
= 0;
2285 cacheline
< size
* count
;
2287 retval
= dpm
->instr_write_data_r0(dpm
,
2288 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2289 address
+ cacheline
);
2290 if (retval
!= ERROR_OK
)
2295 /* invalidate D-Cache */
2296 if (armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
) {
2297 /* DCIMVAC - Invalidate data Cache line
2299 * MCR p15, 0, r0, c7, c6, 1
2301 for (uint32_t cacheline
= 0;
2302 cacheline
< size
* count
;
2304 retval
= dpm
->instr_write_data_r0(dpm
,
2305 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2306 address
+ cacheline
);
2307 if (retval
!= ERROR_OK
)
2312 /* (void) */ dpm
->finish(dpm
);
2318 static int cortex_a_write_memory(struct target
*target
, uint32_t address
,
2319 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2321 int mmu_enabled
= 0;
2322 uint32_t virt
, phys
;
2324 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2325 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
2326 uint8_t apsel
= swjdp
->apsel
;
2328 /* cortex_a handles unaligned memory access */
2329 LOG_DEBUG("Writing memory at address 0x%" PRIx32
"; size %" PRId32
"; count %" PRId32
, address
,
2332 /* determine if MMU was enabled on target stop */
2333 if (!armv7a
->is_armv7r
) {
2334 retval
= cortex_a_mmu(target
, &mmu_enabled
);
2335 if (retval
!= ERROR_OK
)
2339 if (armv7a
->memory_ap_available
&& (apsel
== armv7a
->memory_ap
)) {
2340 LOG_DEBUG("Writing memory to address 0x%" PRIx32
"; size %" PRId32
"; count %" PRId32
, address
, size
,
2344 retval
= cortex_a_virt2phys(target
, virt
, &phys
);
2345 if (retval
!= ERROR_OK
)
2348 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32
" to r:0x%" PRIx32
,
2353 retval
= cortex_a_write_phys_memory(target
, address
, size
,
2357 retval
= cortex_a_check_address(target
, address
);
2358 if (retval
!= ERROR_OK
)
2360 /* enable MMU as we could have disabled it for phys access */
2361 retval
= cortex_a_mmu_modify(target
, 1);
2362 if (retval
!= ERROR_OK
)
2365 retval
= cortex_a_write_apb_ab_memory(target
, address
, size
, count
, buffer
);
2370 static int cortex_a_handle_target_request(void *priv
)
2372 struct target
*target
= priv
;
2373 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2374 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
2377 if (!target_was_examined(target
))
2379 if (!target
->dbg_msg_enabled
)
2382 if (target
->state
== TARGET_RUNNING
) {
2385 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2386 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2388 /* check if we have data */
2389 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2390 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2391 armv7a
->debug_base
+ CPUDBG_DTRTX
, &request
);
2392 if (retval
== ERROR_OK
) {
2393 target_request(target
, request
);
2394 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2395 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2404 * Cortex-A target information and configuration
2407 static int cortex_a_examine_first(struct target
*target
)
2409 struct cortex_a_common
*cortex_a
= target_to_cortex_a(target
);
2410 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
2411 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
2413 int retval
= ERROR_OK
;
2414 uint32_t didr
, ctypr
, ttypr
, cpuid
, dbg_osreg
;
2416 /* We do one extra read to ensure DAP is configured,
2417 * we call ahbap_debugport_init(swjdp) instead
2419 retval
= ahbap_debugport_init(swjdp
);
2420 if (retval
!= ERROR_OK
)
2423 /* Search for the APB-AB - it is needed for access to debug registers */
2424 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv7a
->debug_ap
);
2425 if (retval
!= ERROR_OK
) {
2426 LOG_ERROR("Could not find APB-AP for debug access");
2429 /* Search for the AHB-AB */
2430 retval
= dap_find_ap(swjdp
, AP_TYPE_AHB_AP
, &armv7a
->memory_ap
);
2431 if (retval
!= ERROR_OK
) {
2432 /* AHB-AP not found - use APB-AP */
2433 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2434 armv7a
->memory_ap_available
= false;
2436 armv7a
->memory_ap_available
= true;
2440 if (!target
->dbgbase_set
) {
2442 /* Get ROM Table base */
2444 int32_t coreidx
= target
->coreid
;
2445 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2447 retval
= dap_get_debugbase(swjdp
, 1, &dbgbase
, &apid
);
2448 if (retval
!= ERROR_OK
)
2450 /* Lookup 0x15 -- Processor DAP */
2451 retval
= dap_lookup_cs_component(swjdp
, 1, dbgbase
, 0x15,
2452 &armv7a
->debug_base
, &coreidx
);
2453 if (retval
!= ERROR_OK
)
2455 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
,
2456 coreidx
, armv7a
->debug_base
);
2458 armv7a
->debug_base
= target
->dbgbase
;
2460 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2461 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
2462 if (retval
!= ERROR_OK
)
2465 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2466 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
2467 if (retval
!= ERROR_OK
) {
2468 LOG_DEBUG("Examine %s failed", "CPUID");
2472 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2473 armv7a
->debug_base
+ CPUDBG_CTYPR
, &ctypr
);
2474 if (retval
!= ERROR_OK
) {
2475 LOG_DEBUG("Examine %s failed", "CTYPR");
2479 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2480 armv7a
->debug_base
+ CPUDBG_TTYPR
, &ttypr
);
2481 if (retval
!= ERROR_OK
) {
2482 LOG_DEBUG("Examine %s failed", "TTYPR");
2486 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2487 armv7a
->debug_base
+ CPUDBG_DIDR
, &didr
);
2488 if (retval
!= ERROR_OK
) {
2489 LOG_DEBUG("Examine %s failed", "DIDR");
2493 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2494 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
2495 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
2496 LOG_DEBUG("didr = 0x%08" PRIx32
, didr
);
2498 cortex_a
->cpuid
= cpuid
;
2499 cortex_a
->ctypr
= ctypr
;
2500 cortex_a
->ttypr
= ttypr
;
2501 cortex_a
->didr
= didr
;
2503 /* Unlocking the debug registers */
2504 if ((cpuid
& CORTEX_A_MIDR_PARTNUM_MASK
) >> CORTEX_A_MIDR_PARTNUM_SHIFT
==
2505 CORTEX_A15_PARTNUM
) {
2507 retval
= mem_ap_sel_write_atomic_u32(swjdp
, armv7a
->debug_ap
,
2508 armv7a
->debug_base
+ CPUDBG_OSLAR
,
2511 if (retval
!= ERROR_OK
)
2515 retval
= mem_ap_sel_read_atomic_u32(swjdp
, armv7a
->debug_ap
,
2516 armv7a
->debug_base
+ CPUDBG_PRSR
, &dbg_osreg
);
2518 if (retval
!= ERROR_OK
)
2521 LOG_DEBUG("target->coreid %d DBGPRSR 0x%" PRIx32
, target
->coreid
, dbg_osreg
);
2523 armv7a
->arm
.core_type
= ARM_MODE_MON
;
2524 retval
= cortex_a_dpm_setup(cortex_a
, didr
);
2525 if (retval
!= ERROR_OK
)
2528 /* Setup Breakpoint Register Pairs */
2529 cortex_a
->brp_num
= ((didr
>> 24) & 0x0F) + 1;
2530 cortex_a
->brp_num_context
= ((didr
>> 20) & 0x0F) + 1;
2531 cortex_a
->brp_num_available
= cortex_a
->brp_num
;
2532 cortex_a
->brp_list
= calloc(cortex_a
->brp_num
, sizeof(struct cortex_a_brp
));
2533 /* cortex_a->brb_enabled = ????; */
2534 for (i
= 0; i
< cortex_a
->brp_num
; i
++) {
2535 cortex_a
->brp_list
[i
].used
= 0;
2536 if (i
< (cortex_a
->brp_num
-cortex_a
->brp_num_context
))
2537 cortex_a
->brp_list
[i
].type
= BRP_NORMAL
;
2539 cortex_a
->brp_list
[i
].type
= BRP_CONTEXT
;
2540 cortex_a
->brp_list
[i
].value
= 0;
2541 cortex_a
->brp_list
[i
].control
= 0;
2542 cortex_a
->brp_list
[i
].BRPn
= i
;
2545 LOG_DEBUG("Configured %i hw breakpoints", cortex_a
->brp_num
);
2547 target_set_examined(target
);
2551 static int cortex_a_examine(struct target
*target
)
2553 int retval
= ERROR_OK
;
2555 /* don't re-probe hardware after each reset */
2556 if (!target_was_examined(target
))
2557 retval
= cortex_a_examine_first(target
);
2559 /* Configure core debug access */
2560 if (retval
== ERROR_OK
)
2561 retval
= cortex_a_init_debug_access(target
);
2567 * Cortex-A target creation and initialization
2570 static int cortex_a_init_target(struct command_context
*cmd_ctx
,
2571 struct target
*target
)
2573 /* examine_first() does a bunch of this */
2577 static int cortex_a_init_arch_info(struct target
*target
,
2578 struct cortex_a_common
*cortex_a
, struct jtag_tap
*tap
)
2580 struct armv7a_common
*armv7a
= &cortex_a
->armv7a_common
;
2581 struct adiv5_dap
*dap
= &armv7a
->dap
;
2583 armv7a
->arm
.dap
= dap
;
2585 /* Setup struct cortex_a_common */
2586 cortex_a
->common_magic
= CORTEX_A_COMMON_MAGIC
;
2587 /* tap has no dap initialized */
2589 armv7a
->arm
.dap
= dap
;
2590 /* Setup struct cortex_a_common */
2592 /* prepare JTAG information for the new target */
2593 cortex_a
->jtag_info
.tap
= tap
;
2594 cortex_a
->jtag_info
.scann_size
= 4;
2596 /* Leave (only) generic DAP stuff for debugport_init() */
2597 dap
->jtag_info
= &cortex_a
->jtag_info
;
2599 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2600 dap
->tar_autoincr_block
= (1 << 10);
2601 dap
->memaccess_tck
= 80;
2604 armv7a
->arm
.dap
= tap
->dap
;
2606 cortex_a
->fast_reg_read
= 0;
2608 /* register arch-specific functions */
2609 armv7a
->examine_debug_reason
= NULL
;
2611 armv7a
->post_debug_entry
= cortex_a_post_debug_entry
;
2613 armv7a
->pre_restore_context
= NULL
;
2615 armv7a
->armv7a_mmu
.read_physical_memory
= cortex_a_read_phys_memory
;
2618 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2620 /* REVISIT v7a setup should be in a v7a-specific routine */
2621 armv7a_init_arch_info(target
, armv7a
);
2622 target_register_timer_callback(cortex_a_handle_target_request
, 1, 1, target
);
2627 static int cortex_a_target_create(struct target
*target
, Jim_Interp
*interp
)
2629 struct cortex_a_common
*cortex_a
= calloc(1, sizeof(struct cortex_a_common
));
2631 cortex_a
->armv7a_common
.is_armv7r
= false;
2633 return cortex_a_init_arch_info(target
, cortex_a
, target
->tap
);
2636 static int cortex_r4_target_create(struct target
*target
, Jim_Interp
*interp
)
2638 struct cortex_a_common
*cortex_a
= calloc(1, sizeof(struct cortex_a_common
));
2640 cortex_a
->armv7a_common
.is_armv7r
= true;
2642 return cortex_a_init_arch_info(target
, cortex_a
, target
->tap
);
2646 static int cortex_a_mmu(struct target
*target
, int *enabled
)
2648 if (target
->state
!= TARGET_HALTED
) {
2649 LOG_ERROR("%s: target not halted", __func__
);
2650 return ERROR_TARGET_INVALID
;
2653 *enabled
= target_to_cortex_a(target
)->armv7a_common
.armv7a_mmu
.mmu_enabled
;
2657 static int cortex_a_virt2phys(struct target
*target
,
2658 uint32_t virt
, uint32_t *phys
)
2660 int retval
= ERROR_FAIL
;
2661 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2662 struct adiv5_dap
*swjdp
= armv7a
->arm
.dap
;
2663 uint8_t apsel
= swjdp
->apsel
;
2664 if (armv7a
->memory_ap_available
&& (apsel
== armv7a
->memory_ap
)) {
2666 retval
= armv7a_mmu_translate_va(target
,
2668 if (retval
!= ERROR_OK
)
2671 } else {/* use this method if armv7a->memory_ap not selected
2672 * mmu must be enable in order to get a correct translation */
2673 retval
= cortex_a_mmu_modify(target
, 1);
2674 if (retval
!= ERROR_OK
)
2676 retval
= armv7a_mmu_translate_va_pa(target
, virt
, phys
, 1);
2682 COMMAND_HANDLER(cortex_a_handle_cache_info_command
)
2684 struct target
*target
= get_current_target(CMD_CTX
);
2685 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2687 return armv7a_handle_cache_info_command(CMD_CTX
,
2688 &armv7a
->armv7a_mmu
.armv7a_cache
);
2692 COMMAND_HANDLER(cortex_a_handle_dbginit_command
)
2694 struct target
*target
= get_current_target(CMD_CTX
);
2695 if (!target_was_examined(target
)) {
2696 LOG_ERROR("target not examined yet");
2700 return cortex_a_init_debug_access(target
);
2702 COMMAND_HANDLER(cortex_a_handle_smp_off_command
)
2704 struct target
*target
= get_current_target(CMD_CTX
);
2705 /* check target is an smp target */
2706 struct target_list
*head
;
2707 struct target
*curr
;
2708 head
= target
->head
;
2710 if (head
!= (struct target_list
*)NULL
) {
2711 while (head
!= (struct target_list
*)NULL
) {
2712 curr
= head
->target
;
2716 /* fixes the target display to the debugger */
2717 target
->gdb_service
->target
= target
;
2722 COMMAND_HANDLER(cortex_a_handle_smp_on_command
)
2724 struct target
*target
= get_current_target(CMD_CTX
);
2725 struct target_list
*head
;
2726 struct target
*curr
;
2727 head
= target
->head
;
2728 if (head
!= (struct target_list
*)NULL
) {
2730 while (head
!= (struct target_list
*)NULL
) {
2731 curr
= head
->target
;
2739 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command
)
2741 struct target
*target
= get_current_target(CMD_CTX
);
2742 int retval
= ERROR_OK
;
2743 struct target_list
*head
;
2744 head
= target
->head
;
2745 if (head
!= (struct target_list
*)NULL
) {
2746 if (CMD_ARGC
== 1) {
2748 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2749 if (ERROR_OK
!= retval
)
2751 target
->gdb_service
->core
[1] = coreid
;
2754 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2755 , target
->gdb_service
->core
[1]);
2760 static const struct command_registration cortex_a_exec_command_handlers
[] = {
2762 .name
= "cache_info",
2763 .handler
= cortex_a_handle_cache_info_command
,
2764 .mode
= COMMAND_EXEC
,
2765 .help
= "display information about target caches",
2770 .handler
= cortex_a_handle_dbginit_command
,
2771 .mode
= COMMAND_EXEC
,
2772 .help
= "Initialize core debug",
2775 { .name
= "smp_off",
2776 .handler
= cortex_a_handle_smp_off_command
,
2777 .mode
= COMMAND_EXEC
,
2778 .help
= "Stop smp handling",
2782 .handler
= cortex_a_handle_smp_on_command
,
2783 .mode
= COMMAND_EXEC
,
2784 .help
= "Restart smp handling",
2789 .handler
= cortex_a_handle_smp_gdb_command
,
2790 .mode
= COMMAND_EXEC
,
2791 .help
= "display/fix current core played to gdb",
2796 COMMAND_REGISTRATION_DONE
2798 static const struct command_registration cortex_a_command_handlers
[] = {
2800 .chain
= arm_command_handlers
,
2803 .chain
= armv7a_command_handlers
,
2807 .mode
= COMMAND_ANY
,
2808 .help
= "Cortex-A command group",
2810 .chain
= cortex_a_exec_command_handlers
,
2812 COMMAND_REGISTRATION_DONE
2815 struct target_type cortexa_target
= {
2817 .deprecated_name
= "cortex_a8",
2819 .poll
= cortex_a_poll
,
2820 .arch_state
= armv7a_arch_state
,
2822 .halt
= cortex_a_halt
,
2823 .resume
= cortex_a_resume
,
2824 .step
= cortex_a_step
,
2826 .assert_reset
= cortex_a_assert_reset
,
2827 .deassert_reset
= cortex_a_deassert_reset
,
2829 /* REVISIT allow exporting VFP3 registers ... */
2830 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
2832 .read_memory
= cortex_a_read_memory
,
2833 .write_memory
= cortex_a_write_memory
,
2835 .checksum_memory
= arm_checksum_memory
,
2836 .blank_check_memory
= arm_blank_check_memory
,
2838 .run_algorithm
= armv4_5_run_algorithm
,
2840 .add_breakpoint
= cortex_a_add_breakpoint
,
2841 .add_context_breakpoint
= cortex_a_add_context_breakpoint
,
2842 .add_hybrid_breakpoint
= cortex_a_add_hybrid_breakpoint
,
2843 .remove_breakpoint
= cortex_a_remove_breakpoint
,
2844 .add_watchpoint
= NULL
,
2845 .remove_watchpoint
= NULL
,
2847 .commands
= cortex_a_command_handlers
,
2848 .target_create
= cortex_a_target_create
,
2849 .init_target
= cortex_a_init_target
,
2850 .examine
= cortex_a_examine
,
2852 .read_phys_memory
= cortex_a_read_phys_memory
,
2853 .write_phys_memory
= cortex_a_write_phys_memory
,
2854 .mmu
= cortex_a_mmu
,
2855 .virt2phys
= cortex_a_virt2phys
,
2858 static const struct command_registration cortex_r4_exec_command_handlers
[] = {
2860 .name
= "cache_info",
2861 .handler
= cortex_a_handle_cache_info_command
,
2862 .mode
= COMMAND_EXEC
,
2863 .help
= "display information about target caches",
2868 .handler
= cortex_a_handle_dbginit_command
,
2869 .mode
= COMMAND_EXEC
,
2870 .help
= "Initialize core debug",
2874 COMMAND_REGISTRATION_DONE
2876 static const struct command_registration cortex_r4_command_handlers
[] = {
2878 .chain
= arm_command_handlers
,
2881 .chain
= armv7a_command_handlers
,
2884 .name
= "cortex_r4",
2885 .mode
= COMMAND_ANY
,
2886 .help
= "Cortex-R4 command group",
2888 .chain
= cortex_r4_exec_command_handlers
,
2890 COMMAND_REGISTRATION_DONE
2893 struct target_type cortexr4_target
= {
2894 .name
= "cortex_r4",
2896 .poll
= cortex_a_poll
,
2897 .arch_state
= armv7a_arch_state
,
2899 .halt
= cortex_a_halt
,
2900 .resume
= cortex_a_resume
,
2901 .step
= cortex_a_step
,
2903 .assert_reset
= cortex_a_assert_reset
,
2904 .deassert_reset
= cortex_a_deassert_reset
,
2906 /* REVISIT allow exporting VFP3 registers ... */
2907 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
2909 .read_memory
= cortex_a_read_memory
,
2910 .write_memory
= cortex_a_write_memory
,
2912 .checksum_memory
= arm_checksum_memory
,
2913 .blank_check_memory
= arm_blank_check_memory
,
2915 .run_algorithm
= armv4_5_run_algorithm
,
2917 .add_breakpoint
= cortex_a_add_breakpoint
,
2918 .add_context_breakpoint
= cortex_a_add_context_breakpoint
,
2919 .add_hybrid_breakpoint
= cortex_a_add_hybrid_breakpoint
,
2920 .remove_breakpoint
= cortex_a_remove_breakpoint
,
2921 .add_watchpoint
= NULL
,
2922 .remove_watchpoint
= NULL
,
2924 .commands
= cortex_r4_command_handlers
,
2925 .target_create
= cortex_r4_target_create
,
2926 .init_target
= cortex_a_init_target
,
2927 .examine
= cortex_a_examine
,