1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
6 ***************************************************************************/
12 #include "breakpoints.h"
14 #include "a64_disassembler.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
24 #include <helper/nvp.h>
25 #include <helper/time_support.h>
37 struct aarch64_private_config
{
38 struct adiv5_private_config adiv5_config
;
42 static int aarch64_poll(struct target
*target
);
43 static int aarch64_debug_entry(struct target
*target
);
44 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
45 static int aarch64_set_breakpoint(struct target
*target
,
46 struct breakpoint
*breakpoint
, uint8_t matchmode
);
47 static int aarch64_set_context_breakpoint(struct target
*target
,
48 struct breakpoint
*breakpoint
, uint8_t matchmode
);
49 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
50 struct breakpoint
*breakpoint
);
51 static int aarch64_unset_breakpoint(struct target
*target
,
52 struct breakpoint
*breakpoint
);
53 static int aarch64_mmu(struct target
*target
, int *enabled
);
54 static int aarch64_virt2phys(struct target
*target
,
55 target_addr_t virt
, target_addr_t
*phys
);
56 static int aarch64_read_cpu_memory(struct target
*target
,
57 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
59 static int aarch64_restore_system_control_reg(struct target
*target
)
61 enum arm_mode target_mode
= ARM_MODE_ANY
;
62 int retval
= ERROR_OK
;
65 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
66 struct armv8_common
*armv8
= target_to_armv8(target
);
68 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
69 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
70 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
72 switch (armv8
->arm
.core_mode
) {
74 target_mode
= ARMV8_64_EL1H
;
78 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
82 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
86 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
96 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
100 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
101 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
105 if (target_mode
!= ARM_MODE_ANY
)
106 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
108 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
109 if (retval
!= ERROR_OK
)
112 if (target_mode
!= ARM_MODE_ANY
)
113 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target
*target
, int enable
)
124 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
125 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
126 int retval
= ERROR_OK
;
127 enum arm_mode target_mode
= ARM_MODE_ANY
;
131 /* if mmu enabled at target stop and mmu not enable */
132 if (!(aarch64
->system_control_reg
& 0x1U
)) {
133 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
136 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
137 aarch64
->system_control_reg_curr
|= 0x1U
;
139 if (aarch64
->system_control_reg_curr
& 0x4U
) {
140 /* data cache is active */
141 aarch64
->system_control_reg_curr
&= ~0x4U
;
142 /* flush data cache armv8 function to be called */
143 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
144 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
146 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
147 aarch64
->system_control_reg_curr
&= ~0x1U
;
151 switch (armv8
->arm
.core_mode
) {
153 target_mode
= ARMV8_64_EL1H
;
157 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
161 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
165 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
175 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
179 LOG_DEBUG("unknown cpu state 0x%x", armv8
->arm
.core_mode
);
182 if (target_mode
!= ARM_MODE_ANY
)
183 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
185 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
186 aarch64
->system_control_reg_curr
);
188 if (target_mode
!= ARM_MODE_ANY
)
189 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
195 * Basic debug access, very low level assumes state is saved
197 static int aarch64_init_debug_access(struct target
*target
)
199 struct armv8_common
*armv8
= target_to_armv8(target
);
203 LOG_DEBUG("%s", target_name(target
));
205 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
206 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
207 if (retval
!= ERROR_OK
) {
208 LOG_DEBUG("Examine %s failed", "oslock");
212 /* Clear Sticky Power Down status Bit in PRSR to enable access to
213 the registers in the Core Power Domain */
214 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
215 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
216 if (retval
!= ERROR_OK
)
220 * Static CTI configuration:
221 * Channel 0 -> trigger outputs HALT request to PE
222 * Channel 1 -> trigger outputs Resume request to PE
223 * Gate all channel trigger events from entering the CTM
227 retval
= arm_cti_enable(armv8
->cti
, true);
228 /* By default, gate all channel events to and from the CTM */
229 if (retval
== ERROR_OK
)
230 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
231 /* output halt requests to PE on channel 0 event */
232 if (retval
== ERROR_OK
)
233 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
234 /* output restart requests to PE on channel 1 event */
235 if (retval
== ERROR_OK
)
236 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
237 if (retval
!= ERROR_OK
)
240 /* Resync breakpoint registers */
245 /* Write to memory mapped registers directly with no cache or mmu handling */
246 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
247 target_addr_t address
,
251 struct armv8_common
*armv8
= target_to_armv8(target
);
253 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
258 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
260 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
263 dpm
->arm
= &a8
->armv8_common
.arm
;
266 retval
= armv8_dpm_setup(dpm
);
267 if (retval
== ERROR_OK
)
268 retval
= armv8_dpm_initialize(dpm
);
273 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
275 struct armv8_common
*armv8
= target_to_armv8(target
);
276 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
279 static int aarch64_check_state_one(struct target
*target
,
280 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
282 struct armv8_common
*armv8
= target_to_armv8(target
);
286 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
287 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
288 if (retval
!= ERROR_OK
)
295 *p_result
= (prsr
& mask
) == (val
& mask
);
300 static int aarch64_wait_halt_one(struct target
*target
)
302 int retval
= ERROR_OK
;
305 int64_t then
= timeval_ms();
309 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
310 if (retval
!= ERROR_OK
|| halted
)
313 if (timeval_ms() > then
+ 1000) {
314 retval
= ERROR_TARGET_TIMEOUT
;
315 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
322 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
324 int retval
= ERROR_OK
;
325 struct target_list
*head
;
326 struct target
*first
= NULL
;
328 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
330 foreach_smp_target(head
, target
->smp_targets
) {
331 struct target
*curr
= head
->target
;
332 struct armv8_common
*armv8
= target_to_armv8(curr
);
334 if (exc_target
&& curr
== target
)
336 if (!target_was_examined(curr
))
338 if (curr
->state
!= TARGET_RUNNING
)
341 /* HACK: mark this target as prepared for halting */
342 curr
->debug_reason
= DBG_REASON_DBGRQ
;
344 /* open the gate for channel 0 to let HALT requests pass to the CTM */
345 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
346 if (retval
== ERROR_OK
)
347 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
348 if (retval
!= ERROR_OK
)
351 LOG_DEBUG("target %s prepared", target_name(curr
));
358 if (exc_target
&& first
)
367 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
369 int retval
= ERROR_OK
;
370 struct armv8_common
*armv8
= target_to_armv8(target
);
372 LOG_DEBUG("%s", target_name(target
));
374 /* allow Halting Debug Mode */
375 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
376 if (retval
!= ERROR_OK
)
379 /* trigger an event on channel 0, this outputs a halt request to the PE */
380 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
381 if (retval
!= ERROR_OK
)
384 if (mode
== HALT_SYNC
) {
385 retval
= aarch64_wait_halt_one(target
);
386 if (retval
!= ERROR_OK
) {
387 if (retval
== ERROR_TARGET_TIMEOUT
)
388 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
396 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
398 struct target
*next
= target
;
401 /* prepare halt on all PEs of the group */
402 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
404 if (exc_target
&& next
== target
)
407 /* halt the target PE */
408 if (retval
== ERROR_OK
)
409 retval
= aarch64_halt_one(next
, HALT_LAZY
);
411 if (retval
!= ERROR_OK
)
414 /* wait for all PEs to halt */
415 int64_t then
= timeval_ms();
417 bool all_halted
= true;
418 struct target_list
*head
;
421 foreach_smp_target(head
, target
->smp_targets
) {
426 if (!target_was_examined(curr
))
429 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
430 if (retval
!= ERROR_OK
|| !halted
) {
439 if (timeval_ms() > then
+ 1000) {
440 retval
= ERROR_TARGET_TIMEOUT
;
445 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
446 * and it looks like the CTI's are not connected by a common
447 * trigger matrix. It seems that we need to halt one core in each
448 * cluster explicitly. So if we find that a core has not halted
449 * yet, we trigger an explicit halt for the second cluster.
451 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
452 if (retval
!= ERROR_OK
)
459 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
461 struct target
*gdb_target
= NULL
;
462 struct target_list
*head
;
465 if (debug_reason
== DBG_REASON_NOTHALTED
) {
466 LOG_DEBUG("Halting remaining targets in SMP group");
467 aarch64_halt_smp(target
, true);
470 /* poll all targets in the group, but skip the target that serves GDB */
471 foreach_smp_target(head
, target
->smp_targets
) {
473 /* skip calling context */
476 if (!target_was_examined(curr
))
478 /* skip targets that were already halted */
479 if (curr
->state
== TARGET_HALTED
)
481 /* remember the gdb_service->target */
482 if (curr
->gdb_service
)
483 gdb_target
= curr
->gdb_service
->target
;
485 if (curr
== gdb_target
)
488 /* avoid recursion in aarch64_poll() */
494 /* after all targets were updated, poll the gdb serving target */
495 if (gdb_target
&& gdb_target
!= target
)
496 aarch64_poll(gdb_target
);
502 * Aarch64 Run control
505 static int aarch64_poll(struct target
*target
)
507 enum target_state prev_target_state
;
508 int retval
= ERROR_OK
;
511 retval
= aarch64_check_state_one(target
,
512 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
513 if (retval
!= ERROR_OK
)
517 prev_target_state
= target
->state
;
518 if (prev_target_state
!= TARGET_HALTED
) {
519 enum target_debug_reason debug_reason
= target
->debug_reason
;
521 /* We have a halting debug event */
522 target
->state
= TARGET_HALTED
;
523 LOG_DEBUG("Target %s halted", target_name(target
));
524 retval
= aarch64_debug_entry(target
);
525 if (retval
!= ERROR_OK
)
529 update_halt_gdb(target
, debug_reason
);
531 if (arm_semihosting(target
, &retval
) != 0)
534 switch (prev_target_state
) {
538 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
540 case TARGET_DEBUG_RUNNING
:
541 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
548 target
->state
= TARGET_RUNNING
;
553 static int aarch64_halt(struct target
*target
)
555 struct armv8_common
*armv8
= target_to_armv8(target
);
556 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
559 return aarch64_halt_smp(target
, false);
561 return aarch64_halt_one(target
, HALT_SYNC
);
564 static int aarch64_restore_one(struct target
*target
, int current
,
565 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
567 struct armv8_common
*armv8
= target_to_armv8(target
);
568 struct arm
*arm
= &armv8
->arm
;
572 LOG_DEBUG("%s", target_name(target
));
574 if (!debug_execution
)
575 target_free_all_working_areas(target
);
577 /* current = 1: continue on current pc, otherwise continue at <address> */
578 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
580 resume_pc
= *address
;
582 *address
= resume_pc
;
584 /* Make sure that the Armv7 gdb thumb fixups does not
585 * kill the return address
587 switch (arm
->core_state
) {
589 resume_pc
&= 0xFFFFFFFC;
591 case ARM_STATE_AARCH64
:
592 resume_pc
&= 0xFFFFFFFFFFFFFFFCULL
;
594 case ARM_STATE_THUMB
:
595 case ARM_STATE_THUMB_EE
:
596 /* When the return address is loaded into PC
597 * bit 0 must be 1 to stay in Thumb state
601 case ARM_STATE_JAZELLE
:
602 LOG_ERROR("How do I resume into Jazelle state??");
605 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
606 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
607 arm
->pc
->dirty
= true;
608 arm
->pc
->valid
= true;
610 /* called it now before restoring context because it uses cpu
611 * register r0 for restoring system control register */
612 retval
= aarch64_restore_system_control_reg(target
);
613 if (retval
== ERROR_OK
)
614 retval
= aarch64_restore_context(target
, handle_breakpoints
);
620 * prepare single target for restart
624 static int aarch64_prepare_restart_one(struct target
*target
)
626 struct armv8_common
*armv8
= target_to_armv8(target
);
631 LOG_DEBUG("%s", target_name(target
));
633 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
634 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
635 if (retval
!= ERROR_OK
)
638 if ((dscr
& DSCR_ITE
) == 0)
639 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
640 if ((dscr
& DSCR_ERR
) != 0)
641 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
643 /* acknowledge a pending CTI halt event */
644 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
646 * open the CTI gate for channel 1 so that the restart events
647 * get passed along to all PEs. Also close gate for channel 0
648 * to isolate the PE from halt events.
650 if (retval
== ERROR_OK
)
651 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
652 if (retval
== ERROR_OK
)
653 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
655 /* make sure that DSCR.HDE is set */
656 if (retval
== ERROR_OK
) {
658 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
659 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
662 if (retval
== ERROR_OK
) {
663 /* clear sticky bits in PRSR, SDR is now 0 */
664 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
665 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
671 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
673 struct armv8_common
*armv8
= target_to_armv8(target
);
676 LOG_DEBUG("%s", target_name(target
));
678 /* trigger an event on channel 1, generates a restart request to the PE */
679 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
680 if (retval
!= ERROR_OK
)
683 if (mode
== RESTART_SYNC
) {
684 int64_t then
= timeval_ms();
688 * if PRSR.SDR is set now, the target did restart, even
689 * if it's now already halted again (e.g. due to breakpoint)
691 retval
= aarch64_check_state_one(target
,
692 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
693 if (retval
!= ERROR_OK
|| resumed
)
696 if (timeval_ms() > then
+ 1000) {
697 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
698 retval
= ERROR_TARGET_TIMEOUT
;
704 if (retval
!= ERROR_OK
)
707 target
->debug_reason
= DBG_REASON_NOTHALTED
;
708 target
->state
= TARGET_RUNNING
;
713 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
717 LOG_DEBUG("%s", target_name(target
));
719 retval
= aarch64_prepare_restart_one(target
);
720 if (retval
== ERROR_OK
)
721 retval
= aarch64_do_restart_one(target
, mode
);
727 * prepare all but the current target for restart
729 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
731 int retval
= ERROR_OK
;
732 struct target_list
*head
;
733 struct target
*first
= NULL
;
736 foreach_smp_target(head
, target
->smp_targets
) {
737 struct target
*curr
= head
->target
;
739 /* skip calling target */
742 if (!target_was_examined(curr
))
744 if (curr
->state
!= TARGET_HALTED
)
747 /* resume at current address, not in step mode */
748 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
749 if (retval
== ERROR_OK
)
750 retval
= aarch64_prepare_restart_one(curr
);
751 if (retval
!= ERROR_OK
) {
752 LOG_ERROR("failed to restore target %s", target_name(curr
));
755 /* remember the first valid target in the group */
767 static int aarch64_step_restart_smp(struct target
*target
)
769 int retval
= ERROR_OK
;
770 struct target_list
*head
;
771 struct target
*first
= NULL
;
773 LOG_DEBUG("%s", target_name(target
));
775 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
776 if (retval
!= ERROR_OK
)
780 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
781 if (retval
!= ERROR_OK
) {
782 LOG_DEBUG("error restarting target %s", target_name(first
));
786 int64_t then
= timeval_ms();
788 struct target
*curr
= target
;
789 bool all_resumed
= true;
791 foreach_smp_target(head
, target
->smp_targets
) {
800 if (!target_was_examined(curr
))
803 retval
= aarch64_check_state_one(curr
,
804 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
805 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
810 if (curr
->state
!= TARGET_RUNNING
) {
811 curr
->state
= TARGET_RUNNING
;
812 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
813 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
820 if (timeval_ms() > then
+ 1000) {
821 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
822 retval
= ERROR_TARGET_TIMEOUT
;
826 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
827 * and it looks like the CTI's are not connected by a common
828 * trigger matrix. It seems that we need to halt one core in each
829 * cluster explicitly. So if we find that a core has not halted
830 * yet, we trigger an explicit resume for the second cluster.
832 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
833 if (retval
!= ERROR_OK
)
840 static int aarch64_resume(struct target
*target
, int current
,
841 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
844 uint64_t addr
= address
;
846 struct armv8_common
*armv8
= target_to_armv8(target
);
847 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
849 if (target
->state
!= TARGET_HALTED
)
850 return ERROR_TARGET_NOT_HALTED
;
853 * If this target is part of a SMP group, prepare the others
854 * targets for resuming. This involves restoring the complete
855 * target register context and setting up CTI gates to accept
856 * resume events from the trigger matrix.
859 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
860 if (retval
!= ERROR_OK
)
864 /* all targets prepared, restore and restart the current target */
865 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
867 if (retval
== ERROR_OK
)
868 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
869 if (retval
!= ERROR_OK
)
873 int64_t then
= timeval_ms();
875 struct target
*curr
= target
;
876 struct target_list
*head
;
877 bool all_resumed
= true;
879 foreach_smp_target(head
, target
->smp_targets
) {
886 if (!target_was_examined(curr
))
889 retval
= aarch64_check_state_one(curr
,
890 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
891 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
896 if (curr
->state
!= TARGET_RUNNING
) {
897 curr
->state
= TARGET_RUNNING
;
898 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
899 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
906 if (timeval_ms() > then
+ 1000) {
907 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
908 retval
= ERROR_TARGET_TIMEOUT
;
913 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
914 * and it looks like the CTI's are not connected by a common
915 * trigger matrix. It seems that we need to halt one core in each
916 * cluster explicitly. So if we find that a core has not halted
917 * yet, we trigger an explicit resume for the second cluster.
919 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
920 if (retval
!= ERROR_OK
)
925 if (retval
!= ERROR_OK
)
928 target
->debug_reason
= DBG_REASON_NOTHALTED
;
930 if (!debug_execution
) {
931 target
->state
= TARGET_RUNNING
;
932 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
933 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
935 target
->state
= TARGET_DEBUG_RUNNING
;
936 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
937 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
943 static int aarch64_debug_entry(struct target
*target
)
945 int retval
= ERROR_OK
;
946 struct armv8_common
*armv8
= target_to_armv8(target
);
947 struct arm_dpm
*dpm
= &armv8
->dpm
;
948 enum arm_state core_state
;
951 /* make sure to clear all sticky errors */
952 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
953 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
954 if (retval
== ERROR_OK
)
955 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
956 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
957 if (retval
== ERROR_OK
)
958 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
960 if (retval
!= ERROR_OK
)
963 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
966 core_state
= armv8_dpm_get_core_state(dpm
);
967 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
968 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
970 /* close the CTI gate for all events */
971 if (retval
== ERROR_OK
)
972 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
973 /* discard async exceptions */
974 if (retval
== ERROR_OK
)
975 retval
= dpm
->instr_cpsr_sync(dpm
);
976 if (retval
!= ERROR_OK
)
979 /* Examine debug reason */
980 armv8_dpm_report_dscr(dpm
, dscr
);
982 /* save the memory address that triggered the watchpoint */
983 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
986 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
987 armv8
->debug_base
+ CPUV8_DBG_EDWAR0
, &tmp
);
988 if (retval
!= ERROR_OK
)
990 target_addr_t edwar
= tmp
;
992 /* EDWAR[63:32] has unknown content in aarch32 state */
993 if (core_state
== ARM_STATE_AARCH64
) {
994 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
995 armv8
->debug_base
+ CPUV8_DBG_EDWAR1
, &tmp
);
996 if (retval
!= ERROR_OK
)
998 edwar
|= ((target_addr_t
)tmp
) << 32;
1001 armv8
->dpm
.wp_addr
= edwar
;
1004 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1006 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1007 retval
= armv8
->post_debug_entry(target
);
1012 static int aarch64_post_debug_entry(struct target
*target
)
1014 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1015 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1017 enum arm_mode target_mode
= ARM_MODE_ANY
;
1020 switch (armv8
->arm
.core_mode
) {
1022 target_mode
= ARMV8_64_EL1H
;
1026 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1030 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1034 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1044 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1048 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1049 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
1053 if (target_mode
!= ARM_MODE_ANY
)
1054 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1056 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1057 if (retval
!= ERROR_OK
)
1060 if (target_mode
!= ARM_MODE_ANY
)
1061 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1063 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1064 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1066 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1067 armv8_identify_cache(armv8
);
1068 armv8_read_mpidr(armv8
);
1070 if (armv8
->is_armv8r
) {
1071 armv8
->armv8_mmu
.mmu_enabled
= 0;
1073 armv8
->armv8_mmu
.mmu_enabled
=
1074 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1076 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1077 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1078 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1079 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1084 * single-step a target
1086 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1087 int handle_breakpoints
)
1089 struct armv8_common
*armv8
= target_to_armv8(target
);
1090 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1091 int saved_retval
= ERROR_OK
;
1095 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1097 if (target
->state
!= TARGET_HALTED
) {
1098 LOG_WARNING("target not halted");
1099 return ERROR_TARGET_NOT_HALTED
;
1102 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1103 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1104 /* make sure EDECR.SS is not set when restoring the register */
1106 if (retval
== ERROR_OK
) {
1108 /* set EDECR.SS to enter hardware step mode */
1109 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1110 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1112 /* disable interrupts while stepping */
1113 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1114 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1115 /* bail out if stepping setup has failed */
1116 if (retval
!= ERROR_OK
)
1119 if (target
->smp
&& (current
== 1)) {
1121 * isolate current target so that it doesn't get resumed
1122 * together with the others
1124 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1125 /* resume all other targets in the group */
1126 if (retval
== ERROR_OK
)
1127 retval
= aarch64_step_restart_smp(target
);
1128 if (retval
!= ERROR_OK
) {
1129 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1132 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1135 /* all other targets running, restore and restart the current target */
1136 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1137 if (retval
== ERROR_OK
)
1138 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1140 if (retval
!= ERROR_OK
)
1143 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1144 if (!handle_breakpoints
)
1145 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1147 int64_t then
= timeval_ms();
1152 retval
= aarch64_check_state_one(target
,
1153 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1154 if (retval
!= ERROR_OK
|| stepped
)
1157 if (timeval_ms() > then
+ 100) {
1158 LOG_ERROR("timeout waiting for target %s halt after step",
1159 target_name(target
));
1160 retval
= ERROR_TARGET_TIMEOUT
;
1166 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1167 * causes a timeout. The core takes the step but doesn't complete it and so
1168 * debug state is never entered. However, you can manually halt the core
1169 * as an external debug even is also a WFI wakeup event.
1171 if (retval
== ERROR_TARGET_TIMEOUT
)
1172 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1175 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1176 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1177 if (retval
!= ERROR_OK
)
1180 /* restore interrupts */
1181 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1182 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1183 if (retval
!= ERROR_OK
)
1187 if (saved_retval
!= ERROR_OK
)
1188 return saved_retval
;
1193 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1195 struct armv8_common
*armv8
= target_to_armv8(target
);
1196 struct arm
*arm
= &armv8
->arm
;
1200 LOG_DEBUG("%s", target_name(target
));
1202 if (armv8
->pre_restore_context
)
1203 armv8
->pre_restore_context(target
);
1205 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1206 if (retval
== ERROR_OK
) {
1207 /* registers are now invalid */
1208 register_cache_invalidate(arm
->core_cache
);
1209 register_cache_invalidate(arm
->core_cache
->next
);
1216 * Cortex-A8 Breakpoint and watchpoint functions
1219 /* Setup hardware Breakpoint Register Pair */
1220 static int aarch64_set_breakpoint(struct target
*target
,
1221 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1226 uint8_t byte_addr_select
= 0x0F;
1227 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1228 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1229 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1231 if (breakpoint
->is_set
) {
1232 LOG_WARNING("breakpoint already set");
1236 if (breakpoint
->type
== BKPT_HARD
) {
1238 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1240 if (brp_i
>= aarch64
->brp_num
) {
1241 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1242 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1244 breakpoint_hw_set(breakpoint
, brp_i
);
1245 if (breakpoint
->length
== 2)
1246 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1247 control
= ((matchmode
& 0x7) << 20)
1249 | (byte_addr_select
<< 5)
1251 brp_list
[brp_i
].used
= 1;
1252 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFCULL
;
1253 brp_list
[brp_i
].control
= control
;
1254 bpt_value
= brp_list
[brp_i
].value
;
1256 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1257 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1258 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1259 if (retval
!= ERROR_OK
)
1261 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1262 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].brpn
,
1263 (uint32_t)(bpt_value
>> 32));
1264 if (retval
!= ERROR_OK
)
1267 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1268 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1269 brp_list
[brp_i
].control
);
1270 if (retval
!= ERROR_OK
)
1272 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1273 brp_list
[brp_i
].control
,
1274 brp_list
[brp_i
].value
);
1276 } else if (breakpoint
->type
== BKPT_SOFT
) {
1280 if (armv8_dpm_get_core_state(&armv8
->dpm
) == ARM_STATE_AARCH64
) {
1281 opcode
= ARMV8_HLT(11);
1283 if (breakpoint
->length
!= 4)
1284 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1287 * core_state is ARM_STATE_ARM
1288 * in that case the opcode depends on breakpoint length:
1289 * - if length == 4 => A32 opcode
1290 * - if length == 2 => T32 opcode
1291 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1292 * in that case the length should be changed from 3 to 4 bytes
1294 opcode
= (breakpoint
->length
== 4) ? ARMV8_HLT_A1(11) :
1295 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1297 if (breakpoint
->length
== 3)
1298 breakpoint
->length
= 4;
1301 buf_set_u32(code
, 0, 32, opcode
);
1303 retval
= target_read_memory(target
,
1304 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1305 breakpoint
->length
, 1,
1306 breakpoint
->orig_instr
);
1307 if (retval
!= ERROR_OK
)
1310 armv8_cache_d_inner_flush_virt(armv8
,
1311 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1312 breakpoint
->length
);
1314 retval
= target_write_memory(target
,
1315 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1316 breakpoint
->length
, 1, code
);
1317 if (retval
!= ERROR_OK
)
1320 armv8_cache_d_inner_flush_virt(armv8
,
1321 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1322 breakpoint
->length
);
1324 armv8_cache_i_inner_inval_virt(armv8
,
1325 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1326 breakpoint
->length
);
1328 breakpoint
->is_set
= true;
1331 /* Ensure that halting debug mode is enable */
1332 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1333 if (retval
!= ERROR_OK
) {
1334 LOG_DEBUG("Failed to set DSCR.HDE");
1341 static int aarch64_set_context_breakpoint(struct target
*target
,
1342 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1344 int retval
= ERROR_FAIL
;
1347 uint8_t byte_addr_select
= 0x0F;
1348 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1349 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1350 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1352 if (breakpoint
->is_set
) {
1353 LOG_WARNING("breakpoint already set");
1356 /*check available context BRPs*/
1357 while ((brp_list
[brp_i
].used
||
1358 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1361 if (brp_i
>= aarch64
->brp_num
) {
1362 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1366 breakpoint_hw_set(breakpoint
, brp_i
);
1367 control
= ((matchmode
& 0x7) << 20)
1369 | (byte_addr_select
<< 5)
1371 brp_list
[brp_i
].used
= 1;
1372 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1373 brp_list
[brp_i
].control
= control
;
1374 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1375 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1376 brp_list
[brp_i
].value
);
1377 if (retval
!= ERROR_OK
)
1379 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1380 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1381 brp_list
[brp_i
].control
);
1382 if (retval
!= ERROR_OK
)
1384 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1385 brp_list
[brp_i
].control
,
1386 brp_list
[brp_i
].value
);
1391 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1393 int retval
= ERROR_FAIL
;
1394 int brp_1
= 0; /* holds the contextID pair */
1395 int brp_2
= 0; /* holds the IVA pair */
1396 uint32_t control_ctx
, control_iva
;
1397 uint8_t ctx_byte_addr_select
= 0x0F;
1398 uint8_t iva_byte_addr_select
= 0x0F;
1399 uint8_t ctx_machmode
= 0x03;
1400 uint8_t iva_machmode
= 0x01;
1401 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1402 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1403 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1405 if (breakpoint
->is_set
) {
1406 LOG_WARNING("breakpoint already set");
1409 /*check available context BRPs*/
1410 while ((brp_list
[brp_1
].used
||
1411 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1414 LOG_DEBUG("brp(CTX) found num: %d", brp_1
);
1415 if (brp_1
>= aarch64
->brp_num
) {
1416 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1420 while ((brp_list
[brp_2
].used
||
1421 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1424 LOG_DEBUG("brp(IVA) found num: %d", brp_2
);
1425 if (brp_2
>= aarch64
->brp_num
) {
1426 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1430 breakpoint_hw_set(breakpoint
, brp_1
);
1431 breakpoint
->linked_brp
= brp_2
;
1432 control_ctx
= ((ctx_machmode
& 0x7) << 20)
1435 | (ctx_byte_addr_select
<< 5)
1437 brp_list
[brp_1
].used
= 1;
1438 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1439 brp_list
[brp_1
].control
= control_ctx
;
1440 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1441 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].brpn
,
1442 brp_list
[brp_1
].value
);
1443 if (retval
!= ERROR_OK
)
1445 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1446 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].brpn
,
1447 brp_list
[brp_1
].control
);
1448 if (retval
!= ERROR_OK
)
1451 control_iva
= ((iva_machmode
& 0x7) << 20)
1454 | (iva_byte_addr_select
<< 5)
1456 brp_list
[brp_2
].used
= 1;
1457 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFCULL
;
1458 brp_list
[brp_2
].control
= control_iva
;
1459 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1460 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].brpn
,
1461 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1462 if (retval
!= ERROR_OK
)
1464 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1465 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].brpn
,
1466 brp_list
[brp_2
].value
>> 32);
1467 if (retval
!= ERROR_OK
)
1469 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1470 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].brpn
,
1471 brp_list
[brp_2
].control
);
1472 if (retval
!= ERROR_OK
)
1478 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1481 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1482 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1483 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1485 if (!breakpoint
->is_set
) {
1486 LOG_WARNING("breakpoint not set");
1490 if (breakpoint
->type
== BKPT_HARD
) {
1491 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1492 int brp_i
= breakpoint
->number
;
1493 int brp_j
= breakpoint
->linked_brp
;
1494 if (brp_i
>= aarch64
->brp_num
) {
1495 LOG_DEBUG("Invalid BRP number in breakpoint");
1498 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1499 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1500 brp_list
[brp_i
].used
= 0;
1501 brp_list
[brp_i
].value
= 0;
1502 brp_list
[brp_i
].control
= 0;
1503 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1504 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1505 brp_list
[brp_i
].control
);
1506 if (retval
!= ERROR_OK
)
1508 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1509 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1510 (uint32_t)brp_list
[brp_i
].value
);
1511 if (retval
!= ERROR_OK
)
1513 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1514 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].brpn
,
1515 (uint32_t)brp_list
[brp_i
].value
);
1516 if (retval
!= ERROR_OK
)
1518 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1519 LOG_DEBUG("Invalid BRP number in breakpoint");
1522 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1523 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1524 brp_list
[brp_j
].used
= 0;
1525 brp_list
[brp_j
].value
= 0;
1526 brp_list
[brp_j
].control
= 0;
1527 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1528 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].brpn
,
1529 brp_list
[brp_j
].control
);
1530 if (retval
!= ERROR_OK
)
1532 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1533 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].brpn
,
1534 (uint32_t)brp_list
[brp_j
].value
);
1535 if (retval
!= ERROR_OK
)
1537 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1538 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].brpn
,
1539 (uint32_t)brp_list
[brp_j
].value
);
1540 if (retval
!= ERROR_OK
)
1543 breakpoint
->linked_brp
= 0;
1544 breakpoint
->is_set
= false;
1548 int brp_i
= breakpoint
->number
;
1549 if (brp_i
>= aarch64
->brp_num
) {
1550 LOG_DEBUG("Invalid BRP number in breakpoint");
1553 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1554 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1555 brp_list
[brp_i
].used
= 0;
1556 brp_list
[brp_i
].value
= 0;
1557 brp_list
[brp_i
].control
= 0;
1558 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1559 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1560 brp_list
[brp_i
].control
);
1561 if (retval
!= ERROR_OK
)
1563 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1564 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1565 brp_list
[brp_i
].value
);
1566 if (retval
!= ERROR_OK
)
1569 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1570 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].brpn
,
1571 (uint32_t)brp_list
[brp_i
].value
);
1572 if (retval
!= ERROR_OK
)
1574 breakpoint
->is_set
= false;
1578 /* restore original instruction (kept in target endianness) */
1580 armv8_cache_d_inner_flush_virt(armv8
,
1581 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1582 breakpoint
->length
);
1584 if (breakpoint
->length
== 4) {
1585 retval
= target_write_memory(target
,
1586 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1587 4, 1, breakpoint
->orig_instr
);
1588 if (retval
!= ERROR_OK
)
1591 retval
= target_write_memory(target
,
1592 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1593 2, 1, breakpoint
->orig_instr
);
1594 if (retval
!= ERROR_OK
)
1598 armv8_cache_d_inner_flush_virt(armv8
,
1599 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1600 breakpoint
->length
);
1602 armv8_cache_i_inner_inval_virt(armv8
,
1603 breakpoint
->address
& 0xFFFFFFFFFFFFFFFEULL
,
1604 breakpoint
->length
);
1606 breakpoint
->is_set
= false;
1611 static int aarch64_add_breakpoint(struct target
*target
,
1612 struct breakpoint
*breakpoint
)
1614 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1616 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1617 LOG_INFO("no hardware breakpoint available");
1618 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1621 if (breakpoint
->type
== BKPT_HARD
)
1622 aarch64
->brp_num_available
--;
1624 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1627 static int aarch64_add_context_breakpoint(struct target
*target
,
1628 struct breakpoint
*breakpoint
)
1630 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1632 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1633 LOG_INFO("no hardware breakpoint available");
1634 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1637 if (breakpoint
->type
== BKPT_HARD
)
1638 aarch64
->brp_num_available
--;
1640 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1643 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1644 struct breakpoint
*breakpoint
)
1646 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1648 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1649 LOG_INFO("no hardware breakpoint available");
1650 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1653 if (breakpoint
->type
== BKPT_HARD
)
1654 aarch64
->brp_num_available
--;
1656 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1659 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1661 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1664 /* It is perfectly possible to remove breakpoints while the target is running */
1665 if (target
->state
!= TARGET_HALTED
) {
1666 LOG_WARNING("target not halted");
1667 return ERROR_TARGET_NOT_HALTED
;
1671 if (breakpoint
->is_set
) {
1672 aarch64_unset_breakpoint(target
, breakpoint
);
1673 if (breakpoint
->type
== BKPT_HARD
)
1674 aarch64
->brp_num_available
++;
1680 /* Setup hardware Watchpoint Register Pair */
1681 static int aarch64_set_watchpoint(struct target
*target
,
1682 struct watchpoint
*watchpoint
)
1686 uint32_t control
, offset
, length
;
1687 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1688 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1689 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1691 if (watchpoint
->is_set
) {
1692 LOG_WARNING("watchpoint already set");
1696 while (wp_list
[wp_i
].used
&& (wp_i
< aarch64
->wp_num
))
1698 if (wp_i
>= aarch64
->wp_num
) {
1699 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1700 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1703 control
= (1 << 0) /* enable */
1704 | (3 << 1) /* both user and privileged access */
1705 | (1 << 13); /* higher mode control */
1707 switch (watchpoint
->rw
) {
1719 /* Match up to 8 bytes. */
1720 offset
= watchpoint
->address
& 7;
1721 length
= watchpoint
->length
;
1722 if (offset
+ length
> sizeof(uint64_t)) {
1723 length
= sizeof(uint64_t) - offset
;
1724 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1726 for (; length
> 0; offset
++, length
--)
1727 control
|= (1 << offset
) << 5;
1729 wp_list
[wp_i
].value
= watchpoint
->address
& 0xFFFFFFFFFFFFFFF8ULL
;
1730 wp_list
[wp_i
].control
= control
;
1732 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1733 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1734 (uint32_t)(wp_list
[wp_i
].value
& 0xFFFFFFFF));
1735 if (retval
!= ERROR_OK
)
1737 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1738 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].brpn
,
1739 (uint32_t)(wp_list
[wp_i
].value
>> 32));
1740 if (retval
!= ERROR_OK
)
1743 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1744 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1746 if (retval
!= ERROR_OK
)
1748 LOG_DEBUG("wp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, wp_i
,
1749 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1751 /* Ensure that halting debug mode is enable */
1752 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1753 if (retval
!= ERROR_OK
) {
1754 LOG_DEBUG("Failed to set DSCR.HDE");
1758 wp_list
[wp_i
].used
= 1;
1759 watchpoint_set(watchpoint
, wp_i
);
1764 /* Clear hardware Watchpoint Register Pair */
1765 static int aarch64_unset_watchpoint(struct target
*target
,
1766 struct watchpoint
*watchpoint
)
1769 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1770 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1771 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1773 if (!watchpoint
->is_set
) {
1774 LOG_WARNING("watchpoint not set");
1778 int wp_i
= watchpoint
->number
;
1779 if (wp_i
>= aarch64
->wp_num
) {
1780 LOG_DEBUG("Invalid WP number in watchpoint");
1783 LOG_DEBUG("rwp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, wp_i
,
1784 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1785 wp_list
[wp_i
].used
= 0;
1786 wp_list
[wp_i
].value
= 0;
1787 wp_list
[wp_i
].control
= 0;
1788 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1789 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1790 wp_list
[wp_i
].control
);
1791 if (retval
!= ERROR_OK
)
1793 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1794 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1795 wp_list
[wp_i
].value
);
1796 if (retval
!= ERROR_OK
)
1799 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1800 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].brpn
,
1801 (uint32_t)wp_list
[wp_i
].value
);
1802 if (retval
!= ERROR_OK
)
1804 watchpoint
->is_set
= false;
1809 static int aarch64_add_watchpoint(struct target
*target
,
1810 struct watchpoint
*watchpoint
)
1813 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1815 if (aarch64
->wp_num_available
< 1) {
1816 LOG_INFO("no hardware watchpoint available");
1817 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1820 retval
= aarch64_set_watchpoint(target
, watchpoint
);
1821 if (retval
== ERROR_OK
)
1822 aarch64
->wp_num_available
--;
1827 static int aarch64_remove_watchpoint(struct target
*target
,
1828 struct watchpoint
*watchpoint
)
1830 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1832 if (watchpoint
->is_set
) {
1833 aarch64_unset_watchpoint(target
, watchpoint
);
1834 aarch64
->wp_num_available
++;
1841 * find out which watchpoint hits
1842 * get exception address and compare the address to watchpoints
1844 static int aarch64_hit_watchpoint(struct target
*target
,
1845 struct watchpoint
**hit_watchpoint
)
1847 if (target
->debug_reason
!= DBG_REASON_WATCHPOINT
)
1850 struct armv8_common
*armv8
= target_to_armv8(target
);
1852 target_addr_t exception_address
;
1853 struct watchpoint
*wp
;
1855 exception_address
= armv8
->dpm
.wp_addr
;
1857 if (exception_address
== 0xFFFFFFFF)
1860 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
)
1861 if (exception_address
>= wp
->address
&& exception_address
< (wp
->address
+ wp
->length
)) {
1862 *hit_watchpoint
= wp
;
1870 * Cortex-A8 Reset functions
1873 static int aarch64_enable_reset_catch(struct target
*target
, bool enable
)
1875 struct armv8_common
*armv8
= target_to_armv8(target
);
1879 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1880 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1881 LOG_DEBUG("EDECR = 0x%08" PRIx32
", enable=%d", edecr
, enable
);
1882 if (retval
!= ERROR_OK
)
1890 return mem_ap_write_atomic_u32(armv8
->debug_ap
,
1891 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1894 static int aarch64_clear_reset_catch(struct target
*target
)
1896 struct armv8_common
*armv8
= target_to_armv8(target
);
1901 /* check if Reset Catch debug event triggered as expected */
1902 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1903 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &edesr
);
1904 if (retval
!= ERROR_OK
)
1907 was_triggered
= !!(edesr
& ESR_RC
);
1908 LOG_DEBUG("Reset Catch debug event %s",
1909 was_triggered
? "triggered" : "NOT triggered!");
1911 if (was_triggered
) {
1912 /* clear pending Reset Catch debug event */
1914 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1915 armv8
->debug_base
+ CPUV8_DBG_EDESR
, edesr
);
1916 if (retval
!= ERROR_OK
)
1923 static int aarch64_assert_reset(struct target
*target
)
1925 struct armv8_common
*armv8
= target_to_armv8(target
);
1926 enum reset_types reset_config
= jtag_get_reset_config();
1931 /* Issue some kind of warm reset. */
1932 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1933 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1934 else if (reset_config
& RESET_HAS_SRST
) {
1935 bool srst_asserted
= false;
1937 if (target
->reset_halt
&& !(reset_config
& RESET_SRST_PULLS_TRST
)) {
1938 if (target_was_examined(target
)) {
1940 if (reset_config
& RESET_SRST_NO_GATING
) {
1942 * SRST needs to be asserted *before* Reset Catch
1943 * debug event can be set up.
1945 adapter_assert_reset();
1946 srst_asserted
= true;
1949 /* make sure to clear all sticky errors */
1950 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1951 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1953 /* set up Reset Catch debug event to halt the CPU after reset */
1954 retval
= aarch64_enable_reset_catch(target
, true);
1955 if (retval
!= ERROR_OK
)
1956 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1957 target_name(target
));
1959 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1960 target_name(target
));
1964 /* REVISIT handle "pulls" cases, if there's
1965 * hardware that needs them to work.
1968 adapter_assert_reset();
1970 LOG_ERROR("%s: how to reset?", target_name(target
));
1974 /* registers are now invalid */
1975 if (target_was_examined(target
)) {
1976 register_cache_invalidate(armv8
->arm
.core_cache
);
1977 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1980 target
->state
= TARGET_RESET
;
1985 static int aarch64_deassert_reset(struct target
*target
)
1991 /* be certain SRST is off */
1992 adapter_deassert_reset();
1994 if (!target_was_examined(target
))
1997 retval
= aarch64_init_debug_access(target
);
1998 if (retval
!= ERROR_OK
)
2001 retval
= aarch64_poll(target
);
2002 if (retval
!= ERROR_OK
)
2005 if (target
->reset_halt
) {
2006 /* clear pending Reset Catch debug event */
2007 retval
= aarch64_clear_reset_catch(target
);
2008 if (retval
!= ERROR_OK
)
2009 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2010 target_name(target
));
2012 /* disable Reset Catch debug event */
2013 retval
= aarch64_enable_reset_catch(target
, false);
2014 if (retval
!= ERROR_OK
)
2015 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2016 target_name(target
));
2018 if (target
->state
!= TARGET_HALTED
) {
2019 LOG_WARNING("%s: ran after reset and before halt ...",
2020 target_name(target
));
2021 if (target_was_examined(target
)) {
2022 retval
= aarch64_halt_one(target
, HALT_LAZY
);
2023 if (retval
!= ERROR_OK
)
2026 target
->state
= TARGET_UNKNOWN
;
2034 static int aarch64_write_cpu_memory_slow(struct target
*target
,
2035 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2037 struct armv8_common
*armv8
= target_to_armv8(target
);
2038 struct arm_dpm
*dpm
= &armv8
->dpm
;
2039 struct arm
*arm
= &armv8
->arm
;
2042 armv8_reg_current(arm
, 1)->dirty
= true;
2044 /* change DCC to normal mode if necessary */
2045 if (*dscr
& DSCR_MA
) {
2047 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2048 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2049 if (retval
!= ERROR_OK
)
2054 uint32_t data
, opcode
;
2056 /* write the data to store into DTRRX */
2060 data
= target_buffer_get_u16(target
, buffer
);
2062 data
= target_buffer_get_u32(target
, buffer
);
2063 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2064 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
2065 if (retval
!= ERROR_OK
)
2068 if (arm
->core_state
== ARM_STATE_AARCH64
)
2069 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
2071 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2072 if (retval
!= ERROR_OK
)
2076 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
2078 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
2080 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
2081 retval
= dpm
->instr_execute(dpm
, opcode
);
2082 if (retval
!= ERROR_OK
)
2093 static int aarch64_write_cpu_memory_fast(struct target
*target
,
2094 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2096 struct armv8_common
*armv8
= target_to_armv8(target
);
2097 struct arm
*arm
= &armv8
->arm
;
2100 armv8_reg_current(arm
, 1)->dirty
= true;
2102 /* Step 1.d - Change DCC to memory mode */
2104 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2105 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2106 if (retval
!= ERROR_OK
)
2110 /* Step 2.a - Do the write */
2111 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
2112 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
2113 if (retval
!= ERROR_OK
)
2116 /* Step 3.a - Switch DTR mode back to Normal mode */
2118 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2119 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2120 if (retval
!= ERROR_OK
)
2126 static int aarch64_write_cpu_memory(struct target
*target
,
2127 uint64_t address
, uint32_t size
,
2128 uint32_t count
, const uint8_t *buffer
)
2130 /* write memory through APB-AP */
2131 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2132 struct armv8_common
*armv8
= target_to_armv8(target
);
2133 struct arm_dpm
*dpm
= &armv8
->dpm
;
2134 struct arm
*arm
= &armv8
->arm
;
2137 if (target
->state
!= TARGET_HALTED
) {
2138 LOG_WARNING("target not halted");
2139 return ERROR_TARGET_NOT_HALTED
;
2142 /* Mark register X0 as dirty, as it will be used
2143 * for transferring the data.
2144 * It will be restored automatically when exiting
2147 armv8_reg_current(arm
, 0)->dirty
= true;
2149 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2152 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2153 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2154 if (retval
!= ERROR_OK
)
2157 /* Set Normal access mode */
2158 dscr
= (dscr
& ~DSCR_MA
);
2159 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2160 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2161 if (retval
!= ERROR_OK
)
2164 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2165 /* Write X0 with value 'address' using write procedure */
2166 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2167 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2168 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2169 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2171 /* Write R0 with value 'address' using write procedure */
2172 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2173 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2174 retval
= dpm
->instr_write_data_dcc(dpm
,
2175 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2178 if (retval
!= ERROR_OK
)
2181 if (size
== 4 && (address
% 4) == 0)
2182 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2184 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2186 if (retval
!= ERROR_OK
) {
2187 /* Unset DTR mode */
2188 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2189 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2191 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2192 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2195 /* Check for sticky abort flags in the DSCR */
2196 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2197 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2198 if (retval
!= ERROR_OK
)
2202 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2203 /* Abort occurred - clear it and exit */
2204 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2205 armv8_dpm_handle_exception(dpm
, true);
2213 static int aarch64_read_cpu_memory_slow(struct target
*target
,
2214 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2216 struct armv8_common
*armv8
= target_to_armv8(target
);
2217 struct arm_dpm
*dpm
= &armv8
->dpm
;
2218 struct arm
*arm
= &armv8
->arm
;
2221 armv8_reg_current(arm
, 1)->dirty
= true;
2223 /* change DCC to normal mode (if necessary) */
2224 if (*dscr
& DSCR_MA
) {
2226 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2227 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2228 if (retval
!= ERROR_OK
)
2233 uint32_t opcode
, data
;
2236 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
2238 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
2240 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
2241 retval
= dpm
->instr_execute(dpm
, opcode
);
2242 if (retval
!= ERROR_OK
)
2245 if (arm
->core_state
== ARM_STATE_AARCH64
)
2246 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
2248 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2249 if (retval
!= ERROR_OK
)
2252 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2253 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
2254 if (retval
!= ERROR_OK
)
2258 *buffer
= (uint8_t)data
;
2260 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
2262 target_buffer_set_u32(target
, buffer
, data
);
2272 static int aarch64_read_cpu_memory_fast(struct target
*target
,
2273 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2275 struct armv8_common
*armv8
= target_to_armv8(target
);
2276 struct arm_dpm
*dpm
= &armv8
->dpm
;
2277 struct arm
*arm
= &armv8
->arm
;
2281 /* Mark X1 as dirty */
2282 armv8_reg_current(arm
, 1)->dirty
= true;
2284 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2285 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2286 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
2288 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2289 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2292 if (retval
!= ERROR_OK
)
2295 /* Step 1.e - Change DCC to memory mode */
2297 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2298 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2299 if (retval
!= ERROR_OK
)
2302 /* Step 1.f - read DBGDTRTX and discard the value */
2303 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2304 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2305 if (retval
!= ERROR_OK
)
2309 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2310 * Abort flags are sticky, so can be read at end of transactions
2312 * This data is read in aligned to 32 bit boundary.
2316 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2317 * increments X0 by 4. */
2318 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
2319 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2320 if (retval
!= ERROR_OK
)
2324 /* Step 3.a - set DTR access mode back to Normal mode */
2326 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2327 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2328 if (retval
!= ERROR_OK
)
2331 /* Step 3.b - read DBGDTRTX for the final value */
2332 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2333 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2334 if (retval
!= ERROR_OK
)
2337 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2341 static int aarch64_read_cpu_memory(struct target
*target
,
2342 target_addr_t address
, uint32_t size
,
2343 uint32_t count
, uint8_t *buffer
)
2345 /* read memory through APB-AP */
2346 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2347 struct armv8_common
*armv8
= target_to_armv8(target
);
2348 struct arm_dpm
*dpm
= &armv8
->dpm
;
2349 struct arm
*arm
= &armv8
->arm
;
2352 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2353 address
, size
, count
);
2355 if (target
->state
!= TARGET_HALTED
) {
2356 LOG_WARNING("target not halted");
2357 return ERROR_TARGET_NOT_HALTED
;
2360 /* Mark register X0 as dirty, as it will be used
2361 * for transferring the data.
2362 * It will be restored automatically when exiting
2365 armv8_reg_current(arm
, 0)->dirty
= true;
2368 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2369 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2370 if (retval
!= ERROR_OK
)
2373 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2375 /* Set Normal access mode */
2377 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2378 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2379 if (retval
!= ERROR_OK
)
2382 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2383 /* Write X0 with value 'address' using write procedure */
2384 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2385 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2386 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2387 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2389 /* Write R0 with value 'address' using write procedure */
2390 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2391 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2392 retval
= dpm
->instr_write_data_dcc(dpm
,
2393 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2396 if (retval
!= ERROR_OK
)
2399 if (size
== 4 && (address
% 4) == 0)
2400 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2402 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2404 if (dscr
& DSCR_MA
) {
2406 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2407 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2410 if (retval
!= ERROR_OK
)
2413 /* Check for sticky abort flags in the DSCR */
2414 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2415 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2416 if (retval
!= ERROR_OK
)
2421 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2422 /* Abort occurred - clear it and exit */
2423 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2424 armv8_dpm_handle_exception(dpm
, true);
2432 static int aarch64_read_phys_memory(struct target
*target
,
2433 target_addr_t address
, uint32_t size
,
2434 uint32_t count
, uint8_t *buffer
)
2436 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2438 if (count
&& buffer
) {
2439 /* read memory through APB-AP */
2440 retval
= aarch64_mmu_modify(target
, 0);
2441 if (retval
!= ERROR_OK
)
2443 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2448 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2449 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2451 int mmu_enabled
= 0;
2454 /* determine if MMU was enabled on target stop */
2455 retval
= aarch64_mmu(target
, &mmu_enabled
);
2456 if (retval
!= ERROR_OK
)
2460 /* enable MMU as we could have disabled it for phys access */
2461 retval
= aarch64_mmu_modify(target
, 1);
2462 if (retval
!= ERROR_OK
)
2465 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2468 static int aarch64_write_phys_memory(struct target
*target
,
2469 target_addr_t address
, uint32_t size
,
2470 uint32_t count
, const uint8_t *buffer
)
2472 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2474 if (count
&& buffer
) {
2475 /* write memory through APB-AP */
2476 retval
= aarch64_mmu_modify(target
, 0);
2477 if (retval
!= ERROR_OK
)
2479 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2485 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2486 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2488 int mmu_enabled
= 0;
2491 /* determine if MMU was enabled on target stop */
2492 retval
= aarch64_mmu(target
, &mmu_enabled
);
2493 if (retval
!= ERROR_OK
)
2497 /* enable MMU as we could have disabled it for phys access */
2498 retval
= aarch64_mmu_modify(target
, 1);
2499 if (retval
!= ERROR_OK
)
2502 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2505 static int aarch64_handle_target_request(void *priv
)
2507 struct target
*target
= priv
;
2508 struct armv8_common
*armv8
= target_to_armv8(target
);
2511 if (!target_was_examined(target
))
2513 if (!target
->dbg_msg_enabled
)
2516 if (target
->state
== TARGET_RUNNING
) {
2519 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2520 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2522 /* check if we have data */
2523 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2524 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2525 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2526 if (retval
== ERROR_OK
) {
2527 target_request(target
, request
);
2528 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2529 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2537 static int aarch64_examine_first(struct target
*target
)
2539 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2540 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2541 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2542 struct aarch64_private_config
*pc
= target
->private_config
;
2544 int retval
= ERROR_OK
;
2545 uint64_t debug
, ttypr
;
2547 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2548 debug
= ttypr
= cpuid
= 0;
2553 if (!armv8
->debug_ap
) {
2554 if (pc
->adiv5_config
.ap_num
== DP_APSEL_INVALID
) {
2555 /* Search for the APB-AB */
2556 retval
= dap_find_get_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2557 if (retval
!= ERROR_OK
) {
2558 LOG_ERROR("Could not find APB-AP for debug access");
2562 armv8
->debug_ap
= dap_get_ap(swjdp
, pc
->adiv5_config
.ap_num
);
2563 if (!armv8
->debug_ap
) {
2564 LOG_ERROR("Cannot get AP");
2570 retval
= mem_ap_init(armv8
->debug_ap
);
2571 if (retval
!= ERROR_OK
) {
2572 LOG_ERROR("Could not initialize the APB-AP");
2576 armv8
->debug_ap
->memaccess_tck
= 10;
2578 if (!target
->dbgbase_set
) {
2579 /* Lookup Processor DAP */
2580 retval
= dap_lookup_cs_component(armv8
->debug_ap
, ARM_CS_C9_DEVTYPE_CORE_DEBUG
,
2581 &armv8
->debug_base
, target
->coreid
);
2582 if (retval
!= ERROR_OK
)
2584 LOG_DEBUG("Detected core %" PRId32
" dbgbase: " TARGET_ADDR_FMT
,
2585 target
->coreid
, armv8
->debug_base
);
2587 armv8
->debug_base
= target
->dbgbase
;
2589 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2590 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2591 if (retval
!= ERROR_OK
) {
2592 LOG_DEBUG("Examine %s failed", "oslock");
2596 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2597 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2598 if (retval
!= ERROR_OK
) {
2599 LOG_DEBUG("Examine %s failed", "CPUID");
2603 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2604 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2605 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2606 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2607 if (retval
!= ERROR_OK
) {
2608 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2611 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2612 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2613 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2614 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2615 if (retval
!= ERROR_OK
) {
2616 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2620 retval
= dap_run(armv8
->debug_ap
->dap
);
2621 if (retval
!= ERROR_OK
) {
2622 LOG_ERROR("%s: examination failed\n", target_name(target
));
2627 ttypr
= (ttypr
<< 32) | tmp0
;
2629 debug
= (debug
<< 32) | tmp2
;
2631 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2632 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2633 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2636 LOG_TARGET_ERROR(target
, "CTI not specified");
2640 armv8
->cti
= pc
->cti
;
2642 retval
= aarch64_dpm_setup(aarch64
, debug
);
2643 if (retval
!= ERROR_OK
)
2646 /* Setup Breakpoint Register Pairs */
2647 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2648 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2649 aarch64
->brp_num_available
= aarch64
->brp_num
;
2650 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2651 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2652 aarch64
->brp_list
[i
].used
= 0;
2653 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2654 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2656 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2657 aarch64
->brp_list
[i
].value
= 0;
2658 aarch64
->brp_list
[i
].control
= 0;
2659 aarch64
->brp_list
[i
].brpn
= i
;
2662 /* Setup Watchpoint Register Pairs */
2663 aarch64
->wp_num
= (uint32_t)((debug
>> 20) & 0x0F) + 1;
2664 aarch64
->wp_num_available
= aarch64
->wp_num
;
2665 aarch64
->wp_list
= calloc(aarch64
->wp_num
, sizeof(struct aarch64_brp
));
2666 for (i
= 0; i
< aarch64
->wp_num
; i
++) {
2667 aarch64
->wp_list
[i
].used
= 0;
2668 aarch64
->wp_list
[i
].type
= BRP_NORMAL
;
2669 aarch64
->wp_list
[i
].value
= 0;
2670 aarch64
->wp_list
[i
].control
= 0;
2671 aarch64
->wp_list
[i
].brpn
= i
;
2674 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2675 aarch64
->brp_num
, aarch64
->wp_num
);
2677 target
->state
= TARGET_UNKNOWN
;
2678 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2679 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2680 target_set_examined(target
);
2684 static int aarch64_examine(struct target
*target
)
2686 int retval
= ERROR_OK
;
2688 /* don't re-probe hardware after each reset */
2689 if (!target_was_examined(target
))
2690 retval
= aarch64_examine_first(target
);
2692 /* Configure core debug access */
2693 if (retval
== ERROR_OK
)
2694 retval
= aarch64_init_debug_access(target
);
2700 * Cortex-A8 target creation and initialization
2703 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2704 struct target
*target
)
2706 /* examine_first() does a bunch of this */
2707 arm_semihosting_init(target
);
2711 static int aarch64_init_arch_info(struct target
*target
,
2712 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2714 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2716 /* Setup struct aarch64_common */
2717 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2718 armv8
->arm
.dap
= dap
;
2720 /* register arch-specific functions */
2721 armv8
->examine_debug_reason
= NULL
;
2722 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2723 armv8
->pre_restore_context
= NULL
;
2724 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2726 armv8_init_arch_info(target
, armv8
);
2727 target_register_timer_callback(aarch64_handle_target_request
, 1,
2728 TARGET_TIMER_TYPE_PERIODIC
, target
);
2733 static int armv8r_target_create(struct target
*target
, Jim_Interp
*interp
)
2735 struct aarch64_private_config
*pc
= target
->private_config
;
2736 struct aarch64_common
*aarch64
;
2738 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2741 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2743 LOG_ERROR("Out of memory");
2747 aarch64
->armv8_common
.is_armv8r
= true;
2749 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2752 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2754 struct aarch64_private_config
*pc
= target
->private_config
;
2755 struct aarch64_common
*aarch64
;
2757 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2760 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2762 LOG_ERROR("Out of memory");
2766 aarch64
->armv8_common
.is_armv8r
= false;
2768 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2771 static void aarch64_deinit_target(struct target
*target
)
2773 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2774 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2775 struct arm_dpm
*dpm
= &armv8
->dpm
;
2777 if (armv8
->debug_ap
)
2778 dap_put_ap(armv8
->debug_ap
);
2780 armv8_free_reg_cache(target
);
2781 free(aarch64
->brp_list
);
2784 free(target
->private_config
);
2788 static int aarch64_mmu(struct target
*target
, int *enabled
)
2790 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2791 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2792 if (target
->state
!= TARGET_HALTED
) {
2793 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2794 return ERROR_TARGET_INVALID
;
2796 if (armv8
->is_armv8r
)
2799 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2803 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2804 target_addr_t
*phys
)
2806 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2810 * private target configuration items
2812 enum aarch64_cfg_param
{
2816 static const struct jim_nvp nvp_config_opts
[] = {
2817 { .name
= "-cti", .value
= CFG_CTI
},
2818 { .name
= NULL
, .value
= -1 }
2821 static int aarch64_jim_configure(struct target
*target
, struct jim_getopt_info
*goi
)
2823 struct aarch64_private_config
*pc
;
2827 pc
= (struct aarch64_private_config
*)target
->private_config
;
2829 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2830 pc
->adiv5_config
.ap_num
= DP_APSEL_INVALID
;
2831 target
->private_config
= pc
;
2835 * Call adiv5_jim_configure() to parse the common DAP options
2836 * It will return JIM_CONTINUE if it didn't find any known
2837 * options, JIM_OK if it correctly parsed the topmost option
2838 * and JIM_ERR if an error occurred during parameter evaluation.
2839 * For JIM_CONTINUE, we check our own params.
2841 * adiv5_jim_configure() assumes 'private_config' to point to
2842 * 'struct adiv5_private_config'. Override 'private_config'!
2844 target
->private_config
= &pc
->adiv5_config
;
2845 e
= adiv5_jim_configure(target
, goi
);
2846 target
->private_config
= pc
;
2847 if (e
!= JIM_CONTINUE
)
2850 /* parse config or cget options ... */
2851 if (goi
->argc
> 0) {
2852 Jim_SetEmptyResult(goi
->interp
);
2854 /* check first if topmost item is for us */
2855 e
= jim_nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2858 return JIM_CONTINUE
;
2860 e
= jim_getopt_obj(goi
, NULL
);
2866 if (goi
->isconfigure
) {
2868 struct arm_cti
*cti
;
2869 e
= jim_getopt_obj(goi
, &o_cti
);
2872 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2874 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2879 if (goi
->argc
!= 0) {
2880 Jim_WrongNumArgs(goi
->interp
,
2881 goi
->argc
, goi
->argv
,
2886 if (!pc
|| !pc
->cti
) {
2887 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2890 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2896 return JIM_CONTINUE
;
2903 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2905 struct target
*target
= get_current_target(CMD_CTX
);
2906 struct armv8_common
*armv8
= target_to_armv8(target
);
2908 return armv8_handle_cache_info_command(CMD
,
2909 &armv8
->armv8_mmu
.armv8_cache
);
2912 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2914 struct target
*target
= get_current_target(CMD_CTX
);
2915 if (!target_was_examined(target
)) {
2916 LOG_ERROR("target not examined yet");
2920 return aarch64_init_debug_access(target
);
2923 COMMAND_HANDLER(aarch64_handle_disassemble_command
)
2925 struct target
*target
= get_current_target(CMD_CTX
);
2928 LOG_ERROR("No target selected");
2932 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2934 if (aarch64
->common_magic
!= AARCH64_COMMON_MAGIC
) {
2935 command_print(CMD
, "current target isn't an AArch64");
2940 target_addr_t address
;
2944 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], count
);
2947 COMMAND_PARSE_ADDRESS(CMD_ARGV
[0], address
);
2950 return ERROR_COMMAND_SYNTAX_ERROR
;
2953 return a64_disassemble(CMD
, target
, address
, count
);
2956 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2958 struct target
*target
= get_current_target(CMD_CTX
);
2959 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2961 static const struct nvp nvp_maskisr_modes
[] = {
2962 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2963 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2964 { .name
= NULL
, .value
= -1 },
2966 const struct nvp
*n
;
2969 n
= nvp_name2value(nvp_maskisr_modes
, CMD_ARGV
[0]);
2971 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2972 return ERROR_COMMAND_SYNTAX_ERROR
;
2975 aarch64
->isrmasking_mode
= n
->value
;
2978 n
= nvp_value2name(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2979 command_print(CMD
, "aarch64 interrupt mask %s", n
->name
);
2984 COMMAND_HANDLER(aarch64_mcrmrc_command
)
2986 bool is_mcr
= false;
2987 unsigned int arg_cnt
= 5;
2989 if (!strcmp(CMD_NAME
, "mcr")) {
2994 if (arg_cnt
!= CMD_ARGC
)
2995 return ERROR_COMMAND_SYNTAX_ERROR
;
2997 struct target
*target
= get_current_target(CMD_CTX
);
2999 command_print(CMD
, "no current target");
3002 if (!target_was_examined(target
)) {
3003 command_print(CMD
, "%s: not yet examined", target_name(target
));
3004 return ERROR_TARGET_NOT_EXAMINED
;
3007 struct arm
*arm
= target_to_arm(target
);
3009 command_print(CMD
, "%s: not an ARM", target_name(target
));
3013 if (target
->state
!= TARGET_HALTED
)
3014 return ERROR_TARGET_NOT_HALTED
;
3016 if (arm
->core_state
== ARM_STATE_AARCH64
) {
3017 command_print(CMD
, "%s: not 32-bit arm target", target_name(target
));
3028 /* NOTE: parameter sequence matches ARM instruction set usage:
3029 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3030 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3031 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3033 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], cpnum
);
3035 command_print(CMD
, "coprocessor %d out of range", cpnum
);
3036 return ERROR_COMMAND_ARGUMENT_INVALID
;
3039 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], op1
);
3041 command_print(CMD
, "op1 %d out of range", op1
);
3042 return ERROR_COMMAND_ARGUMENT_INVALID
;
3045 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[2], crn
);
3047 command_print(CMD
, "CRn %d out of range", crn
);
3048 return ERROR_COMMAND_ARGUMENT_INVALID
;
3051 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[3], crm
);
3053 command_print(CMD
, "CRm %d out of range", crm
);
3054 return ERROR_COMMAND_ARGUMENT_INVALID
;
3057 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[4], op2
);
3059 command_print(CMD
, "op2 %d out of range", op2
);
3060 return ERROR_COMMAND_ARGUMENT_INVALID
;
3064 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[5], value
);
3066 /* NOTE: parameters reordered! */
3067 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3068 int retval
= arm
->mcr(target
, cpnum
, op1
, op2
, crn
, crm
, value
);
3069 if (retval
!= ERROR_OK
)
3073 /* NOTE: parameters reordered! */
3074 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3075 int retval
= arm
->mrc(target
, cpnum
, op1
, op2
, crn
, crm
, &value
);
3076 if (retval
!= ERROR_OK
)
3079 command_print(CMD
, "0x%" PRIx32
, value
);
3085 static const struct command_registration aarch64_exec_command_handlers
[] = {
3087 .name
= "cache_info",
3088 .handler
= aarch64_handle_cache_info_command
,
3089 .mode
= COMMAND_EXEC
,
3090 .help
= "display information about target caches",
3095 .handler
= aarch64_handle_dbginit_command
,
3096 .mode
= COMMAND_EXEC
,
3097 .help
= "Initialize core debug",
3101 .name
= "disassemble",
3102 .handler
= aarch64_handle_disassemble_command
,
3103 .mode
= COMMAND_EXEC
,
3104 .help
= "Disassemble instructions",
3105 .usage
= "address [count]",
3109 .handler
= aarch64_mask_interrupts_command
,
3110 .mode
= COMMAND_ANY
,
3111 .help
= "mask aarch64 interrupts during single-step",
3112 .usage
= "['on'|'off']",
3116 .mode
= COMMAND_EXEC
,
3117 .handler
= aarch64_mcrmrc_command
,
3118 .help
= "write coprocessor register",
3119 .usage
= "cpnum op1 CRn CRm op2 value",
3123 .mode
= COMMAND_EXEC
,
3124 .handler
= aarch64_mcrmrc_command
,
3125 .help
= "read coprocessor register",
3126 .usage
= "cpnum op1 CRn CRm op2",
3129 .chain
= smp_command_handlers
,
3133 COMMAND_REGISTRATION_DONE
3136 static const struct command_registration aarch64_command_handlers
[] = {
3139 .mode
= COMMAND_ANY
,
3140 .help
= "ARM Command Group",
3142 .chain
= semihosting_common_handlers
3145 .chain
= armv8_command_handlers
,
3149 .mode
= COMMAND_ANY
,
3150 .help
= "Aarch64 command group",
3152 .chain
= aarch64_exec_command_handlers
,
3154 COMMAND_REGISTRATION_DONE
3157 struct target_type aarch64_target
= {
3160 .poll
= aarch64_poll
,
3161 .arch_state
= armv8_arch_state
,
3163 .halt
= aarch64_halt
,
3164 .resume
= aarch64_resume
,
3165 .step
= aarch64_step
,
3167 .assert_reset
= aarch64_assert_reset
,
3168 .deassert_reset
= aarch64_deassert_reset
,
3170 /* REVISIT allow exporting VFP3 registers ... */
3171 .get_gdb_arch
= armv8_get_gdb_arch
,
3172 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
3174 .read_memory
= aarch64_read_memory
,
3175 .write_memory
= aarch64_write_memory
,
3177 .add_breakpoint
= aarch64_add_breakpoint
,
3178 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
3179 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
3180 .remove_breakpoint
= aarch64_remove_breakpoint
,
3181 .add_watchpoint
= aarch64_add_watchpoint
,
3182 .remove_watchpoint
= aarch64_remove_watchpoint
,
3183 .hit_watchpoint
= aarch64_hit_watchpoint
,
3185 .commands
= aarch64_command_handlers
,
3186 .target_create
= aarch64_target_create
,
3187 .target_jim_configure
= aarch64_jim_configure
,
3188 .init_target
= aarch64_init_target
,
3189 .deinit_target
= aarch64_deinit_target
,
3190 .examine
= aarch64_examine
,
3192 .read_phys_memory
= aarch64_read_phys_memory
,
3193 .write_phys_memory
= aarch64_write_phys_memory
,
3195 .virt2phys
= aarch64_virt2phys
,
3198 struct target_type armv8r_target
= {
3201 .poll
= aarch64_poll
,
3202 .arch_state
= armv8_arch_state
,
3204 .halt
= aarch64_halt
,
3205 .resume
= aarch64_resume
,
3206 .step
= aarch64_step
,
3208 .assert_reset
= aarch64_assert_reset
,
3209 .deassert_reset
= aarch64_deassert_reset
,
3211 /* REVISIT allow exporting VFP3 registers ... */
3212 .get_gdb_arch
= armv8_get_gdb_arch
,
3213 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
3215 .read_memory
= aarch64_read_phys_memory
,
3216 .write_memory
= aarch64_write_phys_memory
,
3218 .add_breakpoint
= aarch64_add_breakpoint
,
3219 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
3220 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
3221 .remove_breakpoint
= aarch64_remove_breakpoint
,
3222 .add_watchpoint
= aarch64_add_watchpoint
,
3223 .remove_watchpoint
= aarch64_remove_watchpoint
,
3224 .hit_watchpoint
= aarch64_hit_watchpoint
,
3226 .commands
= aarch64_command_handlers
,
3227 .target_create
= armv8r_target_create
,
3228 .target_jim_configure
= aarch64_jim_configure
,
3229 .init_target
= aarch64_init_target
,
3230 .deinit_target
= aarch64_deinit_target
,
3231 .examine
= aarch64_examine
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)