2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
22 #include "armv8_dpm.h"
23 #include <jtag/jtag.h>
25 #include "breakpoints.h"
26 #include "target_type.h"
27 #include "armv8_opcodes.h"
29 #include "helper/time_support.h"
32 #define T32_FMTITR(instr) (((instr & 0x0000FFFF) << 16) | ((instr & 0xFFFF0000) >> 16))
36 * Implements various ARM DPM operations using architectural debug registers.
37 * These routines layer over core-specific communication methods to cope with
38 * implementation differences between cores like ARM1136 and Cortex-A8.
40 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
41 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
42 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
43 * are abstracted through internal programming interfaces to share code and
44 * to minimize needless differences in debug behavior between cores.
48 * Get core state from EDSCR, without necessity to retrieve CPSR
50 enum arm_state
armv8_dpm_get_core_state(struct arm_dpm
*dpm
)
52 int el
= (dpm
->dscr
>> 8) & 0x3;
53 int rw
= (dpm
->dscr
>> 10) & 0xF;
58 /* find the first '0' in DSCR.RW */
59 for (pos
= 3; pos
>= 0; pos
--) {
60 if ((rw
& (1 << pos
)) == 0)
65 return ARM_STATE_AARCH64
;
70 /*----------------------------------------------------------------------*/
72 static int dpmv8_write_dcc(struct armv8_common
*armv8
, uint32_t data
)
74 return mem_ap_write_u32(armv8
->debug_ap
,
75 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
78 static int dpmv8_write_dcc_64(struct armv8_common
*armv8
, uint64_t data
)
81 ret
= mem_ap_write_u32(armv8
->debug_ap
,
82 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
84 ret
= mem_ap_write_u32(armv8
->debug_ap
,
85 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, data
>> 32);
89 static int dpmv8_read_dcc(struct armv8_common
*armv8
, uint32_t *data
,
92 uint32_t dscr
= DSCR_ITE
;
98 /* Wait for DTRRXfull */
99 long long then
= timeval_ms();
100 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
101 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
102 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
104 if (retval
!= ERROR_OK
)
106 if (timeval_ms() > then
+ 1000) {
107 LOG_ERROR("Timeout waiting for read dcc");
112 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
113 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
115 if (retval
!= ERROR_OK
)
124 static int dpmv8_read_dcc_64(struct armv8_common
*armv8
, uint64_t *data
,
127 uint32_t dscr
= DSCR_ITE
;
134 /* Wait for DTRRXfull */
135 long long then
= timeval_ms();
136 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
137 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
138 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
140 if (retval
!= ERROR_OK
)
142 if (timeval_ms() > then
+ 1000) {
143 LOG_ERROR("Timeout waiting for DTR_TX_FULL, dscr = 0x%08" PRIx32
, dscr
);
148 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
149 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
151 if (retval
!= ERROR_OK
)
154 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
155 armv8
->debug_base
+ CPUV8_DBG_DTRRX
,
157 if (retval
!= ERROR_OK
)
160 *data
= *(uint32_t *)data
| (uint64_t)higher
<< 32;
168 static int dpmv8_dpm_prepare(struct arm_dpm
*dpm
)
170 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
174 /* set up invariant: ITE is set after ever DPM operation */
175 long long then
= timeval_ms();
177 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
178 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
180 if (retval
!= ERROR_OK
)
182 if ((dscr
& DSCR_ITE
) != 0)
184 if (timeval_ms() > then
+ 1000) {
185 LOG_ERROR("Timeout waiting for dpm prepare");
190 /* update the stored copy of dscr */
193 /* this "should never happen" ... */
194 if (dscr
& DSCR_DTR_RX_FULL
) {
195 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
197 retval
= mem_ap_read_u32(armv8
->debug_ap
,
198 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, &dscr
);
199 if (retval
!= ERROR_OK
)
206 static int dpmv8_dpm_finish(struct arm_dpm
*dpm
)
208 /* REVISIT what could be done here? */
212 static int dpmv8_exec_opcode(struct arm_dpm
*dpm
,
213 uint32_t opcode
, uint32_t *p_dscr
)
215 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
216 uint32_t dscr
= dpm
->dscr
;
222 /* Wait for InstrCompl bit to be set */
223 long long then
= timeval_ms();
224 while ((dscr
& DSCR_ITE
) == 0) {
225 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
226 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
227 if (retval
!= ERROR_OK
) {
228 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
231 if (timeval_ms() > then
+ 1000) {
232 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
237 if (armv8_dpm_get_core_state(dpm
) != ARM_STATE_AARCH64
)
238 opcode
= T32_FMTITR(opcode
);
240 retval
= mem_ap_write_u32(armv8
->debug_ap
,
241 armv8
->debug_base
+ CPUV8_DBG_ITR
, opcode
);
242 if (retval
!= ERROR_OK
)
247 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
248 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
249 if (retval
!= ERROR_OK
) {
250 LOG_ERROR("Could not read DSCR register");
253 if (timeval_ms() > then
+ 1000) {
254 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
257 } while ((dscr
& DSCR_ITE
) == 0); /* Wait for InstrCompl bit to be set */
259 /* update dscr and el after each command execution */
261 if (dpm
->last_el
!= ((dscr
>> 8) & 3))
262 LOG_DEBUG("EL %i -> %i", dpm
->last_el
, (dscr
>> 8) & 3);
263 dpm
->last_el
= (dscr
>> 8) & 3;
265 if (dscr
& DSCR_ERR
) {
266 LOG_ERROR("Opcode 0x%08"PRIx32
", DSCR.ERR=1, DSCR.EL=%i", opcode
, dpm
->last_el
);
267 armv8_dpm_handle_exception(dpm
);
277 static int dpmv8_instr_execute(struct arm_dpm
*dpm
, uint32_t opcode
)
279 return dpmv8_exec_opcode(dpm
, opcode
, NULL
);
282 static int dpmv8_instr_write_data_dcc(struct arm_dpm
*dpm
,
283 uint32_t opcode
, uint32_t data
)
285 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
288 retval
= dpmv8_write_dcc(armv8
, data
);
289 if (retval
!= ERROR_OK
)
292 return dpmv8_exec_opcode(dpm
, opcode
, 0);
295 static int dpmv8_instr_write_data_dcc_64(struct arm_dpm
*dpm
,
296 uint32_t opcode
, uint64_t data
)
298 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
301 retval
= dpmv8_write_dcc_64(armv8
, data
);
302 if (retval
!= ERROR_OK
)
305 return dpmv8_exec_opcode(dpm
, opcode
, 0);
308 static int dpmv8_instr_write_data_r0(struct arm_dpm
*dpm
,
309 uint32_t opcode
, uint32_t data
)
311 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
312 uint32_t dscr
= DSCR_ITE
;
315 retval
= dpmv8_write_dcc(armv8
, data
);
316 if (retval
!= ERROR_OK
)
319 retval
= dpmv8_exec_opcode(dpm
, armv8_opcode(armv8
, READ_REG_DTRRX
), &dscr
);
320 if (retval
!= ERROR_OK
)
323 /* then the opcode, taking data from R0 */
324 return dpmv8_exec_opcode(dpm
, opcode
, &dscr
);
327 static int dpmv8_instr_write_data_r0_64(struct arm_dpm
*dpm
,
328 uint32_t opcode
, uint64_t data
)
330 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
333 if (dpm
->arm
->core_state
!= ARM_STATE_AARCH64
)
334 return dpmv8_instr_write_data_r0(dpm
, opcode
, data
);
336 /* transfer data from DCC to R0 */
337 retval
= dpmv8_write_dcc_64(armv8
, data
);
338 if (retval
== ERROR_OK
)
339 retval
= dpmv8_exec_opcode(dpm
, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dpm
->dscr
);
341 /* then the opcode, taking data from R0 */
342 if (retval
== ERROR_OK
)
343 retval
= dpmv8_exec_opcode(dpm
, opcode
, &dpm
->dscr
);
348 static int dpmv8_instr_cpsr_sync(struct arm_dpm
*dpm
)
351 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
353 /* "Prefetch flush" after modifying execution status in CPSR */
354 retval
= dpmv8_exec_opcode(dpm
, armv8_opcode(armv8
, ARMV8_OPC_DSB_SY
), &dpm
->dscr
);
355 if (retval
== ERROR_OK
)
356 dpmv8_exec_opcode(dpm
, armv8_opcode(armv8
, ARMV8_OPC_ISB_SY
), &dpm
->dscr
);
360 static int dpmv8_instr_read_data_dcc(struct arm_dpm
*dpm
,
361 uint32_t opcode
, uint32_t *data
)
363 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
366 /* the opcode, writing data to DCC */
367 retval
= dpmv8_exec_opcode(dpm
, opcode
, &dpm
->dscr
);
368 if (retval
!= ERROR_OK
)
371 return dpmv8_read_dcc(armv8
, data
, &dpm
->dscr
);
374 static int dpmv8_instr_read_data_dcc_64(struct arm_dpm
*dpm
,
375 uint32_t opcode
, uint64_t *data
)
377 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
380 /* the opcode, writing data to DCC */
381 retval
= dpmv8_exec_opcode(dpm
, opcode
, &dpm
->dscr
);
382 if (retval
!= ERROR_OK
)
385 return dpmv8_read_dcc_64(armv8
, data
, &dpm
->dscr
);
388 static int dpmv8_instr_read_data_r0(struct arm_dpm
*dpm
,
389 uint32_t opcode
, uint32_t *data
)
391 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
394 /* the opcode, writing data to R0 */
395 retval
= dpmv8_exec_opcode(dpm
, opcode
, &dpm
->dscr
);
396 if (retval
!= ERROR_OK
)
399 /* write R0 to DCC */
400 retval
= dpmv8_exec_opcode(dpm
, armv8_opcode(armv8
, WRITE_REG_DTRTX
), &dpm
->dscr
);
401 if (retval
!= ERROR_OK
)
404 return dpmv8_read_dcc(armv8
, data
, &dpm
->dscr
);
407 static int dpmv8_instr_read_data_r0_64(struct arm_dpm
*dpm
,
408 uint32_t opcode
, uint64_t *data
)
410 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
413 if (dpm
->arm
->core_state
!= ARM_STATE_AARCH64
) {
415 retval
= dpmv8_instr_read_data_r0(dpm
, opcode
, &tmp
);
416 if (retval
== ERROR_OK
)
421 /* the opcode, writing data to R0 */
422 retval
= dpmv8_exec_opcode(dpm
, opcode
, &dpm
->dscr
);
423 if (retval
!= ERROR_OK
)
426 /* write R0 to DCC */
427 retval
= dpmv8_exec_opcode(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), &dpm
->dscr
);
428 if (retval
!= ERROR_OK
)
431 return dpmv8_read_dcc_64(armv8
, data
, &dpm
->dscr
);
435 static int dpmv8_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
436 target_addr_t addr
, uint32_t control
)
438 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
439 uint32_t vr
= armv8
->debug_base
;
440 uint32_t cr
= armv8
->debug_base
;
444 case 0 ... 15: /* breakpoints */
445 vr
+= CPUV8_DBG_BVR_BASE
;
446 cr
+= CPUV8_DBG_BCR_BASE
;
448 case 16 ... 31: /* watchpoints */
449 vr
+= CPUV8_DBG_WVR_BASE
;
450 cr
+= CPUV8_DBG_WCR_BASE
;
459 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
460 (unsigned) vr
, (unsigned) cr
);
462 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, vr
, addr
);
463 if (retval
!= ERROR_OK
)
465 return mem_ap_write_atomic_u32(armv8
->debug_ap
, cr
, control
);
469 static int dpmv8_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
471 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
476 cr
= armv8
->debug_base
+ CPUV8_DBG_BCR_BASE
;
479 cr
= armv8
->debug_base
+ CPUV8_DBG_WCR_BASE
;
487 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr
);
489 /* clear control register */
490 return mem_ap_write_atomic_u32(armv8
->debug_ap
, cr
, 0);
494 * Coprocessor support
497 /* Read coprocessor */
498 static int dpmv8_mrc(struct target
*target
, int cpnum
,
499 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
502 struct arm
*arm
= target_to_arm(target
);
503 struct arm_dpm
*dpm
= arm
->dpm
;
506 retval
= dpm
->prepare(dpm
);
507 if (retval
!= ERROR_OK
)
510 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum
,
511 (int) op1
, (int) CRn
,
512 (int) CRm
, (int) op2
);
514 /* read coprocessor register into R0; return via DCC */
515 retval
= dpm
->instr_read_data_r0(dpm
,
516 ARMV4_5_MRC(cpnum
, op1
, 0, CRn
, CRm
, op2
),
519 /* (void) */ dpm
->finish(dpm
);
523 static int dpmv8_mcr(struct target
*target
, int cpnum
,
524 uint32_t op1
, uint32_t op2
, uint32_t CRn
, uint32_t CRm
,
527 struct arm
*arm
= target_to_arm(target
);
528 struct arm_dpm
*dpm
= arm
->dpm
;
531 retval
= dpm
->prepare(dpm
);
532 if (retval
!= ERROR_OK
)
535 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum
,
536 (int) op1
, (int) CRn
,
537 (int) CRm
, (int) op2
);
539 /* read DCC into r0; then write coprocessor register from R0 */
540 retval
= dpm
->instr_write_data_r0(dpm
,
541 ARMV4_5_MCR(cpnum
, op1
, 0, CRn
, CRm
, op2
),
544 /* (void) */ dpm
->finish(dpm
);
548 /*----------------------------------------------------------------------*/
551 * Register access utilities
554 int armv8_dpm_modeswitch(struct arm_dpm
*dpm
, enum arm_mode mode
)
556 struct armv8_common
*armv8
= (struct armv8_common
*)dpm
->arm
->arch_info
;
557 int retval
= ERROR_OK
;
558 unsigned int target_el
;
559 enum arm_state core_state
;
562 /* restore previous mode */
563 if (mode
== ARM_MODE_ANY
) {
564 cpsr
= buf_get_u32(dpm
->arm
->cpsr
->value
, 0, 32);
566 LOG_DEBUG("restoring mode, cpsr = 0x%08"PRIx32
, cpsr
);
569 LOG_DEBUG("setting mode 0x%"PRIx32
, mode
);
571 /* else force to the specified mode */
572 if (is_arm_mode(mode
))
578 switch (cpsr
& 0x1f) {
590 * TODO: handle ARM_MODE_HYP
600 target_el
= (cpsr
>> 2) & 3;
603 if (target_el
> SYSTEM_CUREL_EL3
) {
604 LOG_ERROR("%s: Invalid target exception level %i", __func__
, target_el
);
608 LOG_DEBUG("target_el = %i, last_el = %i", target_el
, dpm
->last_el
);
609 if (target_el
> dpm
->last_el
) {
610 retval
= dpm
->instr_execute(dpm
,
611 armv8_opcode(armv8
, ARMV8_OPC_DCPS
) | target_el
);
613 /* DCPS clobbers registers just like an exception taken */
614 armv8_dpm_handle_exception(dpm
);
616 core_state
= armv8_dpm_get_core_state(dpm
);
617 if (core_state
!= ARM_STATE_AARCH64
) {
618 /* cannot do DRPS/ERET when already in EL0 */
619 if (dpm
->last_el
!= 0) {
620 /* load SPSR with the desired mode and execute DRPS */
621 LOG_DEBUG("SPSR = 0x%08"PRIx32
, cpsr
);
622 retval
= dpm
->instr_write_data_r0(dpm
,
623 ARMV8_MSR_GP_xPSR_T1(1, 0, 15), cpsr
);
624 if (retval
== ERROR_OK
)
625 retval
= dpm
->instr_execute(dpm
, armv8_opcode(armv8
, ARMV8_OPC_DRPS
));
629 * need to execute multiple DRPS instructions until target_el
632 while (retval
== ERROR_OK
&& dpm
->last_el
!= target_el
) {
633 unsigned int cur_el
= dpm
->last_el
;
634 retval
= dpm
->instr_execute(dpm
, armv8_opcode(armv8
, ARMV8_OPC_DRPS
));
635 if (cur_el
== dpm
->last_el
) {
636 LOG_INFO("Cannot reach EL %i, SPSR corrupted?", target_el
);
642 /* On executing DRPS, DSPSR and DLR become UNKNOWN, mark them as dirty */
643 dpm
->arm
->cpsr
->dirty
= true;
644 dpm
->arm
->pc
->dirty
= true;
647 * re-evaluate the core state, we might be in Aarch32 state now
648 * we rely on dpm->dscr being up-to-date
650 core_state
= armv8_dpm_get_core_state(dpm
);
651 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
652 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
659 * Common register read, relies on armv8_select_reg_access() having been called.
661 static int dpmv8_read_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
663 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
667 retval
= armv8
->read_reg_u64(armv8
, regnum
, &value_64
);
669 if (retval
== ERROR_OK
) {
672 buf_set_u64(r
->value
, 0, r
->size
, value_64
);
674 LOG_DEBUG("READ: %s, %16.8llx", r
->name
, (unsigned long long) value_64
);
676 LOG_DEBUG("READ: %s, %8.8x", r
->name
, (unsigned int) value_64
);
682 * Common register write, relies on armv8_select_reg_access() having been called.
684 static int dpmv8_write_reg(struct arm_dpm
*dpm
, struct reg
*r
, unsigned regnum
)
686 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
687 int retval
= ERROR_FAIL
;
690 value_64
= buf_get_u64(r
->value
, 0, r
->size
);
692 retval
= armv8
->write_reg_u64(armv8
, regnum
, value_64
);
693 if (retval
== ERROR_OK
) {
696 LOG_DEBUG("WRITE: %s, %16.8llx", r
->name
, (unsigned long long)value_64
);
698 LOG_DEBUG("WRITE: %s, %8.8x", r
->name
, (unsigned int)value_64
);
705 * Read basic registers of the the current context: R0 to R15, and CPSR;
706 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
707 * In normal operation this is called on entry to halting debug state,
708 * possibly after some other operations supporting restore of debug state
709 * or making sure the CPU is fully idle (drain write buffer, etc).
711 int armv8_dpm_read_current_registers(struct arm_dpm
*dpm
)
713 struct arm
*arm
= dpm
->arm
;
714 struct armv8_common
*armv8
= (struct armv8_common
*)arm
->arch_info
;
715 struct reg_cache
*cache
;
720 retval
= dpm
->prepare(dpm
);
721 if (retval
!= ERROR_OK
)
724 cache
= arm
->core_cache
;
726 /* read R0 first (it's used for scratch), then CPSR */
727 r
= cache
->reg_list
+ 0;
729 retval
= dpmv8_read_reg(dpm
, r
, 0);
730 if (retval
!= ERROR_OK
)
735 /* read cpsr to r0 and get it back */
736 retval
= dpm
->instr_read_data_r0(dpm
,
737 armv8_opcode(armv8
, READ_REG_DSPSR
), &cpsr
);
738 if (retval
!= ERROR_OK
)
741 /* update core mode and state */
742 armv8_set_cpsr(arm
, cpsr
);
744 for (unsigned int i
= 1; i
< cache
->num_regs
; i
++) {
745 struct arm_reg
*arm_reg
;
747 r
= armv8_reg_current(arm
, i
);
752 * Only read registers that are available from the
753 * current EL (or core mode).
755 arm_reg
= r
->arch_info
;
756 if (arm_reg
->mode
!= ARM_MODE_ANY
&&
757 dpm
->last_el
!= armv8_curel_from_core_mode(arm_reg
->mode
))
760 retval
= dpmv8_read_reg(dpm
, r
, i
);
761 if (retval
!= ERROR_OK
)
771 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
772 * unless they're removed, or need updating because of single-stepping
773 * or running debugger code.
775 static int dpmv8_maybe_update_bpwp(struct arm_dpm
*dpm
, bool bpwp
,
776 struct dpm_bpwp
*xp
, int *set_p
)
778 int retval
= ERROR_OK
;
785 /* removed or startup; we must disable it */
790 /* disabled, but we must set it */
791 xp
->dirty
= disable
= false;
796 /* set, but we must temporarily disable it */
797 xp
->dirty
= disable
= true;
802 retval
= dpm
->bpwp_disable(dpm
, xp
->number
);
804 retval
= dpm
->bpwp_enable(dpm
, xp
->number
,
805 xp
->address
, xp
->control
);
807 if (retval
!= ERROR_OK
)
808 LOG_ERROR("%s: can't %s HW %spoint %d",
809 disable
? "disable" : "enable",
810 target_name(dpm
->arm
->target
),
811 (xp
->number
< 16) ? "break" : "watch",
817 static int dpmv8_add_breakpoint(struct target
*target
, struct breakpoint
*bp
);
820 * Writes all modified core registers for all processor modes. In normal
821 * operation this is called on exit from halting debug state.
823 * @param dpm: represents the processor
824 * @param bpwp: true ensures breakpoints and watchpoints are set,
825 * false ensures they are cleared
827 int armv8_dpm_write_dirty_registers(struct arm_dpm
*dpm
, bool bpwp
)
829 struct arm
*arm
= dpm
->arm
;
830 struct reg_cache
*cache
= arm
->core_cache
;
833 retval
= dpm
->prepare(dpm
);
834 if (retval
!= ERROR_OK
)
837 /* If we're managing hardware breakpoints for this core, enable
838 * or disable them as requested.
840 * REVISIT We don't yet manage them for ANY cores. Eventually
841 * we should be able to assume we handle them; but until then,
842 * cope with the hand-crafted breakpoint code.
844 if (arm
->target
->type
->add_breakpoint
== dpmv8_add_breakpoint
) {
845 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
846 struct dpm_bp
*dbp
= dpm
->dbp
+ i
;
847 struct breakpoint
*bp
= dbp
->bp
;
849 retval
= dpmv8_maybe_update_bpwp(dpm
, bpwp
, &dbp
->bpwp
,
850 bp
? &bp
->set
: NULL
);
851 if (retval
!= ERROR_OK
)
856 /* enable/disable watchpoints */
857 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
858 struct dpm_wp
*dwp
= dpm
->dwp
+ i
;
859 struct watchpoint
*wp
= dwp
->wp
;
861 retval
= dpmv8_maybe_update_bpwp(dpm
, bpwp
, &dwp
->bpwp
,
862 wp
? &wp
->set
: NULL
);
863 if (retval
!= ERROR_OK
)
867 /* NOTE: writes to breakpoint and watchpoint registers might
868 * be queued, and need (efficient/batched) flushing later.
871 /* Restore original core mode and state */
872 retval
= armv8_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
873 if (retval
!= ERROR_OK
)
876 /* check everything except our scratch register R0 */
877 for (unsigned i
= 1; i
< cache
->num_regs
; i
++) {
880 /* skip PC and CPSR */
881 if (i
== ARMV8_PC
|| i
== ARMV8_xPSR
)
884 if (!cache
->reg_list
[i
].valid
)
887 if (!cache
->reg_list
[i
].dirty
)
890 /* skip all registers not on the current EL */
891 r
= cache
->reg_list
[i
].arch_info
;
892 if (r
->mode
!= ARM_MODE_ANY
&&
893 dpm
->last_el
!= armv8_curel_from_core_mode(r
->mode
))
896 retval
= dpmv8_write_reg(dpm
, &cache
->reg_list
[i
], i
);
897 if (retval
!= ERROR_OK
)
901 /* flush CPSR and PC */
902 if (retval
== ERROR_OK
)
903 retval
= dpmv8_write_reg(dpm
, &cache
->reg_list
[ARMV8_xPSR
], ARMV8_xPSR
);
904 if (retval
== ERROR_OK
)
905 retval
= dpmv8_write_reg(dpm
, &cache
->reg_list
[ARMV8_PC
], ARMV8_PC
);
906 /* flush R0 -- it's *very* dirty by now */
907 if (retval
== ERROR_OK
)
908 retval
= dpmv8_write_reg(dpm
, &cache
->reg_list
[0], 0);
909 if (retval
== ERROR_OK
)
910 dpm
->instr_cpsr_sync(dpm
);
917 * Standard ARM register accessors ... there are three methods
918 * in "struct arm", to support individual read/write and bulk read
922 static int armv8_dpm_read_core_reg(struct target
*target
, struct reg
*r
,
923 int regnum
, enum arm_mode mode
)
925 struct arm
*arm
= target_to_arm(target
);
926 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
928 int max
= arm
->core_cache
->num_regs
;
930 if (regnum
< 0 || regnum
>= max
)
931 return ERROR_COMMAND_SYNTAX_ERROR
;
934 * REVISIT what happens if we try to read SPSR in a core mode
935 * which has no such register?
937 retval
= dpm
->prepare(dpm
);
938 if (retval
!= ERROR_OK
)
941 retval
= dpmv8_read_reg(dpm
, r
, regnum
);
942 if (retval
!= ERROR_OK
)
946 /* (void) */ dpm
->finish(dpm
);
950 static int armv8_dpm_write_core_reg(struct target
*target
, struct reg
*r
,
951 int regnum
, enum arm_mode mode
, uint8_t *value
)
953 struct arm
*arm
= target_to_arm(target
);
954 struct arm_dpm
*dpm
= target_to_arm(target
)->dpm
;
956 int max
= arm
->core_cache
->num_regs
;
958 if (regnum
< 0 || regnum
> max
)
959 return ERROR_COMMAND_SYNTAX_ERROR
;
961 /* REVISIT what happens if we try to write SPSR in a core mode
962 * which has no such register?
965 retval
= dpm
->prepare(dpm
);
966 if (retval
!= ERROR_OK
)
969 retval
= dpmv8_write_reg(dpm
, r
, regnum
);
971 /* always clean up, regardless of error */
977 static int armv8_dpm_full_context(struct target
*target
)
979 struct arm
*arm
= target_to_arm(target
);
980 struct arm_dpm
*dpm
= arm
->dpm
;
981 struct reg_cache
*cache
= arm
->core_cache
;
985 retval
= dpm
->prepare(dpm
);
986 if (retval
!= ERROR_OK
)
990 enum arm_mode mode
= ARM_MODE_ANY
;
994 /* We "know" arm_dpm_read_current_registers() was called so
995 * the unmapped registers (R0..R7, PC, AND CPSR) and some
996 * view of R8..R14 are current. We also "know" oddities of
997 * register mapping: special cases for R8..R12 and SPSR.
999 * Pick some mode with unread registers and read them all.
1000 * Repeat until done.
1002 for (unsigned i
= 0; i
< cache
->num_regs
; i
++) {
1005 if (cache
->reg_list
[i
].valid
)
1007 r
= cache
->reg_list
[i
].arch_info
;
1009 /* may need to pick a mode and set CPSR */
1014 /* For regular (ARM_MODE_ANY) R8..R12
1015 * in case we've entered debug state
1016 * in FIQ mode we need to patch mode.
1018 if (mode
!= ARM_MODE_ANY
)
1019 retval
= armv8_dpm_modeswitch(dpm
, mode
);
1021 retval
= armv8_dpm_modeswitch(dpm
, ARM_MODE_USR
);
1023 if (retval
!= ERROR_OK
)
1026 if (r
->mode
!= mode
)
1029 /* CPSR was read, so "R16" must mean SPSR */
1030 retval
= dpmv8_read_reg(dpm
,
1031 &cache
->reg_list
[i
],
1032 (r
->num
== 16) ? 17 : r
->num
);
1033 if (retval
!= ERROR_OK
)
1039 retval
= armv8_dpm_modeswitch(dpm
, ARM_MODE_ANY
);
1040 /* (void) */ dpm
->finish(dpm
);
1046 /*----------------------------------------------------------------------*/
1049 * Breakpoint and Watchpoint support.
1051 * Hardware {break,watch}points are usually left active, to minimize
1052 * debug entry/exit costs. When they are set or cleared, it's done in
1053 * batches. Also, DPM-conformant hardware can update debug registers
1054 * regardless of whether the CPU is running or halted ... though that
1055 * fact isn't currently leveraged.
1058 static int dpmv8_bpwp_setup(struct arm_dpm
*dpm
, struct dpm_bpwp
*xp
,
1059 uint32_t addr
, uint32_t length
)
1063 control
= (1 << 0) /* enable */
1064 | (3 << 1); /* both user and privileged access */
1066 /* Match 1, 2, or all 4 byte addresses in this word.
1068 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
1069 * Support larger length, when addr is suitably aligned. In
1070 * particular, allow watchpoints on 8 byte "double" values.
1072 * REVISIT allow watchpoints on unaligned 2-bit values; and on
1073 * v7 hardware, unaligned 4-byte ones too.
1077 control
|= (1 << (addr
& 3)) << 5;
1080 /* require 2-byte alignment */
1082 control
|= (3 << (addr
& 2)) << 5;
1087 /* require 4-byte alignment */
1089 control
|= 0xf << 5;
1094 LOG_ERROR("unsupported {break,watch}point length/alignment");
1095 return ERROR_COMMAND_SYNTAX_ERROR
;
1098 /* other shared control bits:
1099 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
1100 * bit 20 == 0 ... not linked to a context ID
1101 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
1104 xp
->address
= addr
& ~3;
1105 xp
->control
= control
;
1108 LOG_DEBUG("BPWP: addr %8.8" PRIx32
", control %" PRIx32
", number %d",
1109 xp
->address
, control
, xp
->number
);
1111 /* hardware is updated in write_dirty_registers() */
1115 static int dpmv8_add_breakpoint(struct target
*target
, struct breakpoint
*bp
)
1117 struct arm
*arm
= target_to_arm(target
);
1118 struct arm_dpm
*dpm
= arm
->dpm
;
1119 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1122 return ERROR_COMMAND_SYNTAX_ERROR
;
1123 if (!dpm
->bpwp_enable
)
1126 /* FIXME we need a generic solution for software breakpoints. */
1127 if (bp
->type
== BKPT_SOFT
)
1128 LOG_DEBUG("using HW bkpt, not SW...");
1130 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
1131 if (!dpm
->dbp
[i
].bp
) {
1132 retval
= dpmv8_bpwp_setup(dpm
, &dpm
->dbp
[i
].bpwp
,
1133 bp
->address
, bp
->length
);
1134 if (retval
== ERROR_OK
)
1135 dpm
->dbp
[i
].bp
= bp
;
1143 static int dpmv8_remove_breakpoint(struct target
*target
, struct breakpoint
*bp
)
1145 struct arm
*arm
= target_to_arm(target
);
1146 struct arm_dpm
*dpm
= arm
->dpm
;
1147 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1149 for (unsigned i
= 0; i
< dpm
->nbp
; i
++) {
1150 if (dpm
->dbp
[i
].bp
== bp
) {
1151 dpm
->dbp
[i
].bp
= NULL
;
1152 dpm
->dbp
[i
].bpwp
.dirty
= true;
1154 /* hardware is updated in write_dirty_registers() */
1163 static int dpmv8_watchpoint_setup(struct arm_dpm
*dpm
, unsigned index_t
,
1164 struct watchpoint
*wp
)
1167 struct dpm_wp
*dwp
= dpm
->dwp
+ index_t
;
1170 /* this hardware doesn't support data value matching or masking */
1171 if (wp
->value
|| wp
->mask
!= ~(uint32_t)0) {
1172 LOG_DEBUG("watchpoint values and masking not supported");
1173 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1176 retval
= dpmv8_bpwp_setup(dpm
, &dwp
->bpwp
, wp
->address
, wp
->length
);
1177 if (retval
!= ERROR_OK
)
1180 control
= dwp
->bpwp
.control
;
1192 dwp
->bpwp
.control
= control
;
1194 dpm
->dwp
[index_t
].wp
= wp
;
1199 static int dpmv8_add_watchpoint(struct target
*target
, struct watchpoint
*wp
)
1201 struct arm
*arm
= target_to_arm(target
);
1202 struct arm_dpm
*dpm
= arm
->dpm
;
1203 int retval
= ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1205 if (dpm
->bpwp_enable
) {
1206 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
1207 if (!dpm
->dwp
[i
].wp
) {
1208 retval
= dpmv8_watchpoint_setup(dpm
, i
, wp
);
1217 static int dpmv8_remove_watchpoint(struct target
*target
, struct watchpoint
*wp
)
1219 struct arm
*arm
= target_to_arm(target
);
1220 struct arm_dpm
*dpm
= arm
->dpm
;
1221 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1223 for (unsigned i
= 0; i
< dpm
->nwp
; i
++) {
1224 if (dpm
->dwp
[i
].wp
== wp
) {
1225 dpm
->dwp
[i
].wp
= NULL
;
1226 dpm
->dwp
[i
].bpwp
.dirty
= true;
1228 /* hardware is updated in write_dirty_registers() */
1237 void armv8_dpm_report_wfar(struct arm_dpm
*dpm
, uint64_t addr
)
1239 switch (dpm
->arm
->core_state
) {
1241 case ARM_STATE_AARCH64
:
1244 case ARM_STATE_THUMB
:
1245 case ARM_STATE_THUMB_EE
:
1248 case ARM_STATE_JAZELLE
:
1252 LOG_DEBUG("Unknown core_state");
1259 * Handle exceptions taken in debug state. This happens mostly for memory
1260 * accesses that violated a MMU policy. Taking an exception while in debug
1261 * state clobbers certain state registers on the target exception level.
1262 * Just mark those registers dirty so that they get restored on resume.
1263 * This works both for Aarch32 and Aarch64 states.
1265 * This function must not perform any actions that trigger another exception
1266 * or a recursion will happen.
1268 void armv8_dpm_handle_exception(struct arm_dpm
*dpm
)
1270 struct armv8_common
*armv8
= dpm
->arm
->arch_info
;
1271 struct reg_cache
*cache
= dpm
->arm
->core_cache
;
1272 enum arm_state core_state
;
1277 static const int clobbered_regs_by_el
[3][5] = {
1278 { ARMV8_PC
, ARMV8_xPSR
, ARMV8_ELR_EL1
, ARMV8_ESR_EL1
, ARMV8_SPSR_EL1
},
1279 { ARMV8_PC
, ARMV8_xPSR
, ARMV8_ELR_EL2
, ARMV8_ESR_EL2
, ARMV8_SPSR_EL2
},
1280 { ARMV8_PC
, ARMV8_xPSR
, ARMV8_ELR_EL3
, ARMV8_ESR_EL3
, ARMV8_SPSR_EL3
},
1283 el
= (dpm
->dscr
>> 8) & 3;
1285 /* safety check, must not happen since EL0 cannot be a target for an exception */
1286 if (el
< SYSTEM_CUREL_EL1
|| el
> SYSTEM_CUREL_EL3
) {
1287 LOG_ERROR("%s: EL %i is invalid, DSCR corrupted?", __func__
, el
);
1291 /* Clear sticky error */
1292 mem_ap_write_u32(armv8
->debug_ap
,
1293 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1295 armv8
->read_reg_u64(armv8
, ARMV8_xPSR
, &dlr
);
1297 armv8
->read_reg_u64(armv8
, ARMV8_PC
, &dlr
);
1299 LOG_DEBUG("Exception taken to EL %i, DLR=0x%016"PRIx64
" DSPSR=0x%08"PRIx32
,
1302 /* mark all clobbered registers as dirty */
1303 for (int i
= 0; i
< 5; i
++)
1304 cache
->reg_list
[clobbered_regs_by_el
[el
-1][i
]].dirty
= true;
1307 * re-evaluate the core state, we might be in Aarch64 state now
1308 * we rely on dpm->dscr being up-to-date
1310 core_state
= armv8_dpm_get_core_state(dpm
);
1311 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
1312 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
1315 /*----------------------------------------------------------------------*/
1318 * Other debug and support utilities
1321 void armv8_dpm_report_dscr(struct arm_dpm
*dpm
, uint32_t dscr
)
1323 struct target
*target
= dpm
->arm
->target
;
1326 dpm
->last_el
= (dscr
>> 8) & 3;
1328 /* Examine debug reason */
1329 switch (DSCR_ENTRY(dscr
)) {
1330 /* FALL THROUGH -- assume a v6 core in abort mode */
1331 case DSCRV8_ENTRY_EXT_DEBUG
: /* EDBGRQ */
1332 target
->debug_reason
= DBG_REASON_DBGRQ
;
1334 case DSCRV8_ENTRY_HALT_STEP_EXECLU
: /* HALT step */
1335 case DSCRV8_ENTRY_HALT_STEP_NORMAL
: /* Halt step*/
1336 case DSCRV8_ENTRY_HALT_STEP
:
1337 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1339 case DSCRV8_ENTRY_HLT
: /* HLT instruction (software breakpoint) */
1340 case DSCRV8_ENTRY_BKPT
: /* SW BKPT (?) */
1341 case DSCRV8_ENTRY_RESET_CATCH
: /* Reset catch */
1342 case DSCRV8_ENTRY_OS_UNLOCK
: /*OS unlock catch*/
1343 case DSCRV8_ENTRY_EXCEPTION_CATCH
: /*exception catch*/
1344 case DSCRV8_ENTRY_SW_ACCESS_DBG
: /*SW access dbg register*/
1345 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1347 case DSCRV8_ENTRY_WATCHPOINT
: /* asynch watchpoint */
1348 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
1351 target
->debug_reason
= DBG_REASON_UNDEFINED
;
1357 /*----------------------------------------------------------------------*/
1360 * Setup and management support.
1364 * Hooks up this DPM to its associated target; call only once.
1365 * Initially this only covers the register cache.
1367 * Oh, and watchpoints. Yeah.
1369 int armv8_dpm_setup(struct arm_dpm
*dpm
)
1371 struct arm
*arm
= dpm
->arm
;
1372 struct target
*target
= arm
->target
;
1373 struct reg_cache
*cache
;
1376 /* register access setup */
1377 arm
->full_context
= armv8_dpm_full_context
;
1378 arm
->read_core_reg
= armv8_dpm_read_core_reg
;
1379 arm
->write_core_reg
= armv8_dpm_write_core_reg
;
1381 if (arm
->core_cache
== NULL
) {
1382 cache
= armv8_build_reg_cache(target
);
1387 /* coprocessor access setup */
1388 arm
->mrc
= dpmv8_mrc
;
1389 arm
->mcr
= dpmv8_mcr
;
1391 dpm
->prepare
= dpmv8_dpm_prepare
;
1392 dpm
->finish
= dpmv8_dpm_finish
;
1394 dpm
->instr_execute
= dpmv8_instr_execute
;
1395 dpm
->instr_write_data_dcc
= dpmv8_instr_write_data_dcc
;
1396 dpm
->instr_write_data_dcc_64
= dpmv8_instr_write_data_dcc_64
;
1397 dpm
->instr_write_data_r0
= dpmv8_instr_write_data_r0
;
1398 dpm
->instr_write_data_r0_64
= dpmv8_instr_write_data_r0_64
;
1399 dpm
->instr_cpsr_sync
= dpmv8_instr_cpsr_sync
;
1401 dpm
->instr_read_data_dcc
= dpmv8_instr_read_data_dcc
;
1402 dpm
->instr_read_data_dcc_64
= dpmv8_instr_read_data_dcc_64
;
1403 dpm
->instr_read_data_r0
= dpmv8_instr_read_data_r0
;
1404 dpm
->instr_read_data_r0_64
= dpmv8_instr_read_data_r0_64
;
1406 dpm
->arm_reg_current
= armv8_reg_current
;
1408 /* dpm->bpwp_enable = dpmv8_bpwp_enable; */
1409 dpm
->bpwp_disable
= dpmv8_bpwp_disable
;
1411 /* breakpoint setup -- optional until it works everywhere */
1412 if (!target
->type
->add_breakpoint
) {
1413 target
->type
->add_breakpoint
= dpmv8_add_breakpoint
;
1414 target
->type
->remove_breakpoint
= dpmv8_remove_breakpoint
;
1417 /* watchpoint setup */
1418 target
->type
->add_watchpoint
= dpmv8_add_watchpoint
;
1419 target
->type
->remove_watchpoint
= dpmv8_remove_watchpoint
;
1421 /* FIXME add vector catch support */
1423 dpm
->nbp
= 1 + ((dpm
->didr
>> 12) & 0xf);
1424 dpm
->dbp
= calloc(dpm
->nbp
, sizeof *dpm
->dbp
);
1426 dpm
->nwp
= 1 + ((dpm
->didr
>> 20) & 0xf);
1427 dpm
->dwp
= calloc(dpm
->nwp
, sizeof *dpm
->dwp
);
1429 if (!dpm
->dbp
|| !dpm
->dwp
) {
1435 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1436 target_name(target
), dpm
->nbp
, dpm
->nwp
);
1438 /* REVISIT ... and some of those breakpoints could match
1439 * execution context IDs...
1446 * Reinitializes DPM state at the beginning of a new debug session
1447 * or after a reset which may have affected the debug module.
1449 int armv8_dpm_initialize(struct arm_dpm
*dpm
)
1451 /* Disable all breakpoints and watchpoints at startup. */
1452 if (dpm
->bpwp_disable
) {
1455 for (i
= 0; i
< dpm
->nbp
; i
++) {
1456 dpm
->dbp
[i
].bpwp
.number
= i
;
1457 (void) dpm
->bpwp_disable(dpm
, i
);
1459 for (i
= 0; i
< dpm
->nwp
; i
++) {
1460 dpm
->dwp
[i
].bpwp
.number
= 16 + i
;
1461 (void) dpm
->bpwp_disable(dpm
, 16 + i
);
1464 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1465 target_name(dpm
->arm
->target
));
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)