1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
6 ***************************************************************************/
12 #include "breakpoints.h"
14 #include "a64_disassembler.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
24 #include <helper/time_support.h>
36 struct aarch64_private_config
{
37 struct adiv5_private_config adiv5_config
;
41 static int aarch64_poll(struct target
*target
);
42 static int aarch64_debug_entry(struct target
*target
);
43 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
44 static int aarch64_set_breakpoint(struct target
*target
,
45 struct breakpoint
*breakpoint
, uint8_t matchmode
);
46 static int aarch64_set_context_breakpoint(struct target
*target
,
47 struct breakpoint
*breakpoint
, uint8_t matchmode
);
48 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
49 struct breakpoint
*breakpoint
);
50 static int aarch64_unset_breakpoint(struct target
*target
,
51 struct breakpoint
*breakpoint
);
52 static int aarch64_mmu(struct target
*target
, int *enabled
);
53 static int aarch64_virt2phys(struct target
*target
,
54 target_addr_t virt
, target_addr_t
*phys
);
55 static int aarch64_read_cpu_memory(struct target
*target
,
56 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
58 static int aarch64_restore_system_control_reg(struct target
*target
)
60 enum arm_mode target_mode
= ARM_MODE_ANY
;
61 int retval
= ERROR_OK
;
64 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
65 struct armv8_common
*armv8
= target_to_armv8(target
);
67 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
68 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
69 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
71 switch (armv8
->arm
.core_mode
) {
73 target_mode
= ARMV8_64_EL1H
;
77 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
81 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
85 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
95 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
99 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
100 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
104 if (target_mode
!= ARM_MODE_ANY
)
105 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
107 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
108 if (retval
!= ERROR_OK
)
111 if (target_mode
!= ARM_MODE_ANY
)
112 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
118 /* modify system_control_reg in order to enable or disable mmu for :
119 * - virt2phys address conversion
120 * - read or write memory in phys or virt address */
121 static int aarch64_mmu_modify(struct target
*target
, int enable
)
123 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
124 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
125 int retval
= ERROR_OK
;
126 enum arm_mode target_mode
= ARM_MODE_ANY
;
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64
->system_control_reg
& 0x1U
)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
136 aarch64
->system_control_reg_curr
|= 0x1U
;
138 if (aarch64
->system_control_reg_curr
& 0x4U
) {
139 /* data cache is active */
140 aarch64
->system_control_reg_curr
&= ~0x4U
;
141 /* flush data cache armv8 function to be called */
142 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
143 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
145 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
146 aarch64
->system_control_reg_curr
&= ~0x1U
;
150 switch (armv8
->arm
.core_mode
) {
152 target_mode
= ARMV8_64_EL1H
;
156 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
160 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
164 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
174 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
178 LOG_DEBUG("unknown cpu state 0x%x", armv8
->arm
.core_mode
);
181 if (target_mode
!= ARM_MODE_ANY
)
182 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
184 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
185 aarch64
->system_control_reg_curr
);
187 if (target_mode
!= ARM_MODE_ANY
)
188 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
194 * Basic debug access, very low level assumes state is saved
196 static int aarch64_init_debug_access(struct target
*target
)
198 struct armv8_common
*armv8
= target_to_armv8(target
);
202 LOG_DEBUG("%s", target_name(target
));
204 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
205 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
206 if (retval
!= ERROR_OK
) {
207 LOG_DEBUG("Examine %s failed", "oslock");
211 /* Clear Sticky Power Down status Bit in PRSR to enable access to
212 the registers in the Core Power Domain */
213 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
214 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
215 if (retval
!= ERROR_OK
)
219 * Static CTI configuration:
220 * Channel 0 -> trigger outputs HALT request to PE
221 * Channel 1 -> trigger outputs Resume request to PE
222 * Gate all channel trigger events from entering the CTM
226 retval
= arm_cti_enable(armv8
->cti
, true);
227 /* By default, gate all channel events to and from the CTM */
228 if (retval
== ERROR_OK
)
229 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
230 /* output halt requests to PE on channel 0 event */
231 if (retval
== ERROR_OK
)
232 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
233 /* output restart requests to PE on channel 1 event */
234 if (retval
== ERROR_OK
)
235 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
236 if (retval
!= ERROR_OK
)
239 /* Resync breakpoint registers */
244 /* Write to memory mapped registers directly with no cache or mmu handling */
245 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
246 target_addr_t address
,
250 struct armv8_common
*armv8
= target_to_armv8(target
);
252 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
257 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
259 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
262 dpm
->arm
= &a8
->armv8_common
.arm
;
265 retval
= armv8_dpm_setup(dpm
);
266 if (retval
== ERROR_OK
)
267 retval
= armv8_dpm_initialize(dpm
);
272 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
274 struct armv8_common
*armv8
= target_to_armv8(target
);
275 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
278 static int aarch64_check_state_one(struct target
*target
,
279 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
281 struct armv8_common
*armv8
= target_to_armv8(target
);
285 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
286 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
287 if (retval
!= ERROR_OK
)
294 *p_result
= (prsr
& mask
) == (val
& mask
);
299 static int aarch64_wait_halt_one(struct target
*target
)
301 int retval
= ERROR_OK
;
304 int64_t then
= timeval_ms();
308 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
309 if (retval
!= ERROR_OK
|| halted
)
312 if (timeval_ms() > then
+ 1000) {
313 retval
= ERROR_TARGET_TIMEOUT
;
314 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
321 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
323 int retval
= ERROR_OK
;
324 struct target_list
*head
;
325 struct target
*first
= NULL
;
327 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
329 foreach_smp_target(head
, target
->smp_targets
) {
330 struct target
*curr
= head
->target
;
331 struct armv8_common
*armv8
= target_to_armv8(curr
);
333 if (exc_target
&& curr
== target
)
335 if (!target_was_examined(curr
))
337 if (curr
->state
!= TARGET_RUNNING
)
340 /* HACK: mark this target as prepared for halting */
341 curr
->debug_reason
= DBG_REASON_DBGRQ
;
343 /* open the gate for channel 0 to let HALT requests pass to the CTM */
344 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
345 if (retval
== ERROR_OK
)
346 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
347 if (retval
!= ERROR_OK
)
350 LOG_DEBUG("target %s prepared", target_name(curr
));
357 if (exc_target
&& first
)
366 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
368 int retval
= ERROR_OK
;
369 struct armv8_common
*armv8
= target_to_armv8(target
);
371 LOG_DEBUG("%s", target_name(target
));
373 /* allow Halting Debug Mode */
374 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
375 if (retval
!= ERROR_OK
)
378 /* trigger an event on channel 0, this outputs a halt request to the PE */
379 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
380 if (retval
!= ERROR_OK
)
383 if (mode
== HALT_SYNC
) {
384 retval
= aarch64_wait_halt_one(target
);
385 if (retval
!= ERROR_OK
) {
386 if (retval
== ERROR_TARGET_TIMEOUT
)
387 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
395 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
397 struct target
*next
= target
;
400 /* prepare halt on all PEs of the group */
401 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
403 if (exc_target
&& next
== target
)
406 /* halt the target PE */
407 if (retval
== ERROR_OK
)
408 retval
= aarch64_halt_one(next
, HALT_LAZY
);
410 if (retval
!= ERROR_OK
)
413 /* wait for all PEs to halt */
414 int64_t then
= timeval_ms();
416 bool all_halted
= true;
417 struct target_list
*head
;
420 foreach_smp_target(head
, target
->smp_targets
) {
425 if (!target_was_examined(curr
))
428 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
429 if (retval
!= ERROR_OK
|| !halted
) {
438 if (timeval_ms() > then
+ 1000) {
439 retval
= ERROR_TARGET_TIMEOUT
;
444 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
445 * and it looks like the CTI's are not connected by a common
446 * trigger matrix. It seems that we need to halt one core in each
447 * cluster explicitly. So if we find that a core has not halted
448 * yet, we trigger an explicit halt for the second cluster.
450 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
451 if (retval
!= ERROR_OK
)
458 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
460 struct target
*gdb_target
= NULL
;
461 struct target_list
*head
;
464 if (debug_reason
== DBG_REASON_NOTHALTED
) {
465 LOG_DEBUG("Halting remaining targets in SMP group");
466 aarch64_halt_smp(target
, true);
469 /* poll all targets in the group, but skip the target that serves GDB */
470 foreach_smp_target(head
, target
->smp_targets
) {
472 /* skip calling context */
475 if (!target_was_examined(curr
))
477 /* skip targets that were already halted */
478 if (curr
->state
== TARGET_HALTED
)
480 /* remember the gdb_service->target */
481 if (curr
->gdb_service
)
482 gdb_target
= curr
->gdb_service
->target
;
484 if (curr
== gdb_target
)
487 /* avoid recursion in aarch64_poll() */
493 /* after all targets were updated, poll the gdb serving target */
494 if (gdb_target
&& gdb_target
!= target
)
495 aarch64_poll(gdb_target
);
501 * Aarch64 Run control
504 static int aarch64_poll(struct target
*target
)
506 enum target_state prev_target_state
;
507 int retval
= ERROR_OK
;
510 retval
= aarch64_check_state_one(target
,
511 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
512 if (retval
!= ERROR_OK
)
516 prev_target_state
= target
->state
;
517 if (prev_target_state
!= TARGET_HALTED
) {
518 enum target_debug_reason debug_reason
= target
->debug_reason
;
520 /* We have a halting debug event */
521 target
->state
= TARGET_HALTED
;
522 LOG_DEBUG("Target %s halted", target_name(target
));
523 retval
= aarch64_debug_entry(target
);
524 if (retval
!= ERROR_OK
)
528 update_halt_gdb(target
, debug_reason
);
530 if (arm_semihosting(target
, &retval
) != 0)
533 switch (prev_target_state
) {
537 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
539 case TARGET_DEBUG_RUNNING
:
540 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
547 target
->state
= TARGET_RUNNING
;
552 static int aarch64_halt(struct target
*target
)
554 struct armv8_common
*armv8
= target_to_armv8(target
);
555 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
558 return aarch64_halt_smp(target
, false);
560 return aarch64_halt_one(target
, HALT_SYNC
);
563 static int aarch64_restore_one(struct target
*target
, int current
,
564 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
566 struct armv8_common
*armv8
= target_to_armv8(target
);
567 struct arm
*arm
= &armv8
->arm
;
571 LOG_DEBUG("%s", target_name(target
));
573 if (!debug_execution
)
574 target_free_all_working_areas(target
);
576 /* current = 1: continue on current pc, otherwise continue at <address> */
577 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
579 resume_pc
= *address
;
581 *address
= resume_pc
;
583 /* Make sure that the Armv7 gdb thumb fixups does not
584 * kill the return address
586 switch (arm
->core_state
) {
588 resume_pc
&= 0xFFFFFFFC;
590 case ARM_STATE_AARCH64
:
591 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
593 case ARM_STATE_THUMB
:
594 case ARM_STATE_THUMB_EE
:
595 /* When the return address is loaded into PC
596 * bit 0 must be 1 to stay in Thumb state
600 case ARM_STATE_JAZELLE
:
601 LOG_ERROR("How do I resume into Jazelle state??");
604 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
605 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
606 arm
->pc
->dirty
= true;
607 arm
->pc
->valid
= true;
609 /* called it now before restoring context because it uses cpu
610 * register r0 for restoring system control register */
611 retval
= aarch64_restore_system_control_reg(target
);
612 if (retval
== ERROR_OK
)
613 retval
= aarch64_restore_context(target
, handle_breakpoints
);
619 * prepare single target for restart
623 static int aarch64_prepare_restart_one(struct target
*target
)
625 struct armv8_common
*armv8
= target_to_armv8(target
);
630 LOG_DEBUG("%s", target_name(target
));
632 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
633 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
634 if (retval
!= ERROR_OK
)
637 if ((dscr
& DSCR_ITE
) == 0)
638 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
639 if ((dscr
& DSCR_ERR
) != 0)
640 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
642 /* acknowledge a pending CTI halt event */
643 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
645 * open the CTI gate for channel 1 so that the restart events
646 * get passed along to all PEs. Also close gate for channel 0
647 * to isolate the PE from halt events.
649 if (retval
== ERROR_OK
)
650 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
651 if (retval
== ERROR_OK
)
652 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
654 /* make sure that DSCR.HDE is set */
655 if (retval
== ERROR_OK
) {
657 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
658 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
661 if (retval
== ERROR_OK
) {
662 /* clear sticky bits in PRSR, SDR is now 0 */
663 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
664 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
670 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
672 struct armv8_common
*armv8
= target_to_armv8(target
);
675 LOG_DEBUG("%s", target_name(target
));
677 /* trigger an event on channel 1, generates a restart request to the PE */
678 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
679 if (retval
!= ERROR_OK
)
682 if (mode
== RESTART_SYNC
) {
683 int64_t then
= timeval_ms();
687 * if PRSR.SDR is set now, the target did restart, even
688 * if it's now already halted again (e.g. due to breakpoint)
690 retval
= aarch64_check_state_one(target
,
691 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
692 if (retval
!= ERROR_OK
|| resumed
)
695 if (timeval_ms() > then
+ 1000) {
696 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
697 retval
= ERROR_TARGET_TIMEOUT
;
703 if (retval
!= ERROR_OK
)
706 target
->debug_reason
= DBG_REASON_NOTHALTED
;
707 target
->state
= TARGET_RUNNING
;
712 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
716 LOG_DEBUG("%s", target_name(target
));
718 retval
= aarch64_prepare_restart_one(target
);
719 if (retval
== ERROR_OK
)
720 retval
= aarch64_do_restart_one(target
, mode
);
726 * prepare all but the current target for restart
728 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
730 int retval
= ERROR_OK
;
731 struct target_list
*head
;
732 struct target
*first
= NULL
;
735 foreach_smp_target(head
, target
->smp_targets
) {
736 struct target
*curr
= head
->target
;
738 /* skip calling target */
741 if (!target_was_examined(curr
))
743 if (curr
->state
!= TARGET_HALTED
)
746 /* resume at current address, not in step mode */
747 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
748 if (retval
== ERROR_OK
)
749 retval
= aarch64_prepare_restart_one(curr
);
750 if (retval
!= ERROR_OK
) {
751 LOG_ERROR("failed to restore target %s", target_name(curr
));
754 /* remember the first valid target in the group */
766 static int aarch64_step_restart_smp(struct target
*target
)
768 int retval
= ERROR_OK
;
769 struct target_list
*head
;
770 struct target
*first
= NULL
;
772 LOG_DEBUG("%s", target_name(target
));
774 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
775 if (retval
!= ERROR_OK
)
779 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
780 if (retval
!= ERROR_OK
) {
781 LOG_DEBUG("error restarting target %s", target_name(first
));
785 int64_t then
= timeval_ms();
787 struct target
*curr
= target
;
788 bool all_resumed
= true;
790 foreach_smp_target(head
, target
->smp_targets
) {
799 if (!target_was_examined(curr
))
802 retval
= aarch64_check_state_one(curr
,
803 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
804 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
809 if (curr
->state
!= TARGET_RUNNING
) {
810 curr
->state
= TARGET_RUNNING
;
811 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
812 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
819 if (timeval_ms() > then
+ 1000) {
820 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
821 retval
= ERROR_TARGET_TIMEOUT
;
825 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
826 * and it looks like the CTI's are not connected by a common
827 * trigger matrix. It seems that we need to halt one core in each
828 * cluster explicitly. So if we find that a core has not halted
829 * yet, we trigger an explicit resume for the second cluster.
831 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
832 if (retval
!= ERROR_OK
)
839 static int aarch64_resume(struct target
*target
, int current
,
840 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
843 uint64_t addr
= address
;
845 struct armv8_common
*armv8
= target_to_armv8(target
);
846 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
848 if (target
->state
!= TARGET_HALTED
)
849 return ERROR_TARGET_NOT_HALTED
;
852 * If this target is part of a SMP group, prepare the others
853 * targets for resuming. This involves restoring the complete
854 * target register context and setting up CTI gates to accept
855 * resume events from the trigger matrix.
858 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
859 if (retval
!= ERROR_OK
)
863 /* all targets prepared, restore and restart the current target */
864 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
866 if (retval
== ERROR_OK
)
867 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
868 if (retval
!= ERROR_OK
)
872 int64_t then
= timeval_ms();
874 struct target
*curr
= target
;
875 struct target_list
*head
;
876 bool all_resumed
= true;
878 foreach_smp_target(head
, target
->smp_targets
) {
885 if (!target_was_examined(curr
))
888 retval
= aarch64_check_state_one(curr
,
889 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
890 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
895 if (curr
->state
!= TARGET_RUNNING
) {
896 curr
->state
= TARGET_RUNNING
;
897 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
898 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
905 if (timeval_ms() > then
+ 1000) {
906 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
907 retval
= ERROR_TARGET_TIMEOUT
;
912 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
913 * and it looks like the CTI's are not connected by a common
914 * trigger matrix. It seems that we need to halt one core in each
915 * cluster explicitly. So if we find that a core has not halted
916 * yet, we trigger an explicit resume for the second cluster.
918 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
919 if (retval
!= ERROR_OK
)
924 if (retval
!= ERROR_OK
)
927 target
->debug_reason
= DBG_REASON_NOTHALTED
;
929 if (!debug_execution
) {
930 target
->state
= TARGET_RUNNING
;
931 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
932 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
934 target
->state
= TARGET_DEBUG_RUNNING
;
935 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
936 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
942 static int aarch64_debug_entry(struct target
*target
)
944 int retval
= ERROR_OK
;
945 struct armv8_common
*armv8
= target_to_armv8(target
);
946 struct arm_dpm
*dpm
= &armv8
->dpm
;
947 enum arm_state core_state
;
950 /* make sure to clear all sticky errors */
951 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
952 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
953 if (retval
== ERROR_OK
)
954 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
955 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
956 if (retval
== ERROR_OK
)
957 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
959 if (retval
!= ERROR_OK
)
962 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
965 core_state
= armv8_dpm_get_core_state(dpm
);
966 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
967 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
969 /* close the CTI gate for all events */
970 if (retval
== ERROR_OK
)
971 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
972 /* discard async exceptions */
973 if (retval
== ERROR_OK
)
974 retval
= dpm
->instr_cpsr_sync(dpm
);
975 if (retval
!= ERROR_OK
)
978 /* Examine debug reason */
979 armv8_dpm_report_dscr(dpm
, dscr
);
981 /* save the memory address that triggered the watchpoint */
982 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
985 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
986 armv8
->debug_base
+ CPUV8_DBG_EDWAR0
, &tmp
);
987 if (retval
!= ERROR_OK
)
989 target_addr_t edwar
= tmp
;
991 /* EDWAR[63:32] has unknown content in aarch32 state */
992 if (core_state
== ARM_STATE_AARCH64
) {
993 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
994 armv8
->debug_base
+ CPUV8_DBG_EDWAR1
, &tmp
);
995 if (retval
!= ERROR_OK
)
997 edwar
|= ((target_addr_t
)tmp
) << 32;
1000 armv8
->dpm
.wp_addr
= edwar
;
1003 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1005 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1006 retval
= armv8
->post_debug_entry(target
);
1011 static int aarch64_post_debug_entry(struct target
*target
)
1013 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1014 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1016 enum arm_mode target_mode
= ARM_MODE_ANY
;
1019 switch (armv8
->arm
.core_mode
) {
1021 target_mode
= ARMV8_64_EL1H
;
1025 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1029 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1033 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1043 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1047 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1048 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
1052 if (target_mode
!= ARM_MODE_ANY
)
1053 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1055 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1056 if (retval
!= ERROR_OK
)
1059 if (target_mode
!= ARM_MODE_ANY
)
1060 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1062 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1063 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1065 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1066 armv8_identify_cache(armv8
);
1067 armv8_read_mpidr(armv8
);
1070 armv8
->armv8_mmu
.mmu_enabled
=
1071 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1072 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1073 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1074 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1075 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1080 * single-step a target
1082 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1083 int handle_breakpoints
)
1085 struct armv8_common
*armv8
= target_to_armv8(target
);
1086 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1087 int saved_retval
= ERROR_OK
;
1091 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1093 if (target
->state
!= TARGET_HALTED
) {
1094 LOG_WARNING("target not halted");
1095 return ERROR_TARGET_NOT_HALTED
;
1098 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1099 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1100 /* make sure EDECR.SS is not set when restoring the register */
1102 if (retval
== ERROR_OK
) {
1104 /* set EDECR.SS to enter hardware step mode */
1105 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1106 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1108 /* disable interrupts while stepping */
1109 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1110 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1111 /* bail out if stepping setup has failed */
1112 if (retval
!= ERROR_OK
)
1115 if (target
->smp
&& (current
== 1)) {
1117 * isolate current target so that it doesn't get resumed
1118 * together with the others
1120 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1121 /* resume all other targets in the group */
1122 if (retval
== ERROR_OK
)
1123 retval
= aarch64_step_restart_smp(target
);
1124 if (retval
!= ERROR_OK
) {
1125 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1128 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1131 /* all other targets running, restore and restart the current target */
1132 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1133 if (retval
== ERROR_OK
)
1134 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1136 if (retval
!= ERROR_OK
)
1139 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1140 if (!handle_breakpoints
)
1141 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1143 int64_t then
= timeval_ms();
1148 retval
= aarch64_check_state_one(target
,
1149 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1150 if (retval
!= ERROR_OK
|| stepped
)
1153 if (timeval_ms() > then
+ 100) {
1154 LOG_ERROR("timeout waiting for target %s halt after step",
1155 target_name(target
));
1156 retval
= ERROR_TARGET_TIMEOUT
;
1162 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1163 * causes a timeout. The core takes the step but doesn't complete it and so
1164 * debug state is never entered. However, you can manually halt the core
1165 * as an external debug even is also a WFI wakeup event.
1167 if (retval
== ERROR_TARGET_TIMEOUT
)
1168 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1171 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1172 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1173 if (retval
!= ERROR_OK
)
1176 /* restore interrupts */
1177 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1178 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1179 if (retval
!= ERROR_OK
)
1183 if (saved_retval
!= ERROR_OK
)
1184 return saved_retval
;
1189 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1191 struct armv8_common
*armv8
= target_to_armv8(target
);
1192 struct arm
*arm
= &armv8
->arm
;
1196 LOG_DEBUG("%s", target_name(target
));
1198 if (armv8
->pre_restore_context
)
1199 armv8
->pre_restore_context(target
);
1201 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1202 if (retval
== ERROR_OK
) {
1203 /* registers are now invalid */
1204 register_cache_invalidate(arm
->core_cache
);
1205 register_cache_invalidate(arm
->core_cache
->next
);
1212 * Cortex-A8 Breakpoint and watchpoint functions
1215 /* Setup hardware Breakpoint Register Pair */
1216 static int aarch64_set_breakpoint(struct target
*target
,
1217 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1222 uint8_t byte_addr_select
= 0x0F;
1223 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1224 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1225 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1227 if (breakpoint
->is_set
) {
1228 LOG_WARNING("breakpoint already set");
1232 if (breakpoint
->type
== BKPT_HARD
) {
1234 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1236 if (brp_i
>= aarch64
->brp_num
) {
1237 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1238 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1240 breakpoint_hw_set(breakpoint
, brp_i
);
1241 if (breakpoint
->length
== 2)
1242 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1243 control
= ((matchmode
& 0x7) << 20)
1245 | (byte_addr_select
<< 5)
1247 brp_list
[brp_i
].used
= 1;
1248 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1249 brp_list
[brp_i
].control
= control
;
1250 bpt_value
= brp_list
[brp_i
].value
;
1252 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1253 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1254 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1255 if (retval
!= ERROR_OK
)
1257 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1258 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].brpn
,
1259 (uint32_t)(bpt_value
>> 32));
1260 if (retval
!= ERROR_OK
)
1263 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1264 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1265 brp_list
[brp_i
].control
);
1266 if (retval
!= ERROR_OK
)
1268 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1269 brp_list
[brp_i
].control
,
1270 brp_list
[brp_i
].value
);
1272 } else if (breakpoint
->type
== BKPT_SOFT
) {
1276 if (armv8_dpm_get_core_state(&armv8
->dpm
) == ARM_STATE_AARCH64
) {
1277 opcode
= ARMV8_HLT(11);
1279 if (breakpoint
->length
!= 4)
1280 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1283 * core_state is ARM_STATE_ARM
1284 * in that case the opcode depends on breakpoint length:
1285 * - if length == 4 => A32 opcode
1286 * - if length == 2 => T32 opcode
1287 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1288 * in that case the length should be changed from 3 to 4 bytes
1290 opcode
= (breakpoint
->length
== 4) ? ARMV8_HLT_A1(11) :
1291 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1293 if (breakpoint
->length
== 3)
1294 breakpoint
->length
= 4;
1297 buf_set_u32(code
, 0, 32, opcode
);
1299 retval
= target_read_memory(target
,
1300 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1301 breakpoint
->length
, 1,
1302 breakpoint
->orig_instr
);
1303 if (retval
!= ERROR_OK
)
1306 armv8_cache_d_inner_flush_virt(armv8
,
1307 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1308 breakpoint
->length
);
1310 retval
= target_write_memory(target
,
1311 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1312 breakpoint
->length
, 1, code
);
1313 if (retval
!= ERROR_OK
)
1316 armv8_cache_d_inner_flush_virt(armv8
,
1317 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1318 breakpoint
->length
);
1320 armv8_cache_i_inner_inval_virt(armv8
,
1321 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1322 breakpoint
->length
);
1324 breakpoint
->is_set
= true;
1327 /* Ensure that halting debug mode is enable */
1328 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1329 if (retval
!= ERROR_OK
) {
1330 LOG_DEBUG("Failed to set DSCR.HDE");
1337 static int aarch64_set_context_breakpoint(struct target
*target
,
1338 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1340 int retval
= ERROR_FAIL
;
1343 uint8_t byte_addr_select
= 0x0F;
1344 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1345 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1346 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1348 if (breakpoint
->is_set
) {
1349 LOG_WARNING("breakpoint already set");
1352 /*check available context BRPs*/
1353 while ((brp_list
[brp_i
].used
||
1354 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1357 if (brp_i
>= aarch64
->brp_num
) {
1358 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1362 breakpoint_hw_set(breakpoint
, brp_i
);
1363 control
= ((matchmode
& 0x7) << 20)
1365 | (byte_addr_select
<< 5)
1367 brp_list
[brp_i
].used
= 1;
1368 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1369 brp_list
[brp_i
].control
= control
;
1370 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1371 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1372 brp_list
[brp_i
].value
);
1373 if (retval
!= ERROR_OK
)
1375 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1376 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1377 brp_list
[brp_i
].control
);
1378 if (retval
!= ERROR_OK
)
1380 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1381 brp_list
[brp_i
].control
,
1382 brp_list
[brp_i
].value
);
1387 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1389 int retval
= ERROR_FAIL
;
1390 int brp_1
= 0; /* holds the contextID pair */
1391 int brp_2
= 0; /* holds the IVA pair */
1392 uint32_t control_ctx
, control_iva
;
1393 uint8_t ctx_byte_addr_select
= 0x0F;
1394 uint8_t iva_byte_addr_select
= 0x0F;
1395 uint8_t ctx_machmode
= 0x03;
1396 uint8_t iva_machmode
= 0x01;
1397 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1398 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1399 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1401 if (breakpoint
->is_set
) {
1402 LOG_WARNING("breakpoint already set");
1405 /*check available context BRPs*/
1406 while ((brp_list
[brp_1
].used
||
1407 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1410 LOG_DEBUG("brp(CTX) found num: %d", brp_1
);
1411 if (brp_1
>= aarch64
->brp_num
) {
1412 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1416 while ((brp_list
[brp_2
].used
||
1417 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1420 LOG_DEBUG("brp(IVA) found num: %d", brp_2
);
1421 if (brp_2
>= aarch64
->brp_num
) {
1422 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1426 breakpoint_hw_set(breakpoint
, brp_1
);
1427 breakpoint
->linked_brp
= brp_2
;
1428 control_ctx
= ((ctx_machmode
& 0x7) << 20)
1431 | (ctx_byte_addr_select
<< 5)
1433 brp_list
[brp_1
].used
= 1;
1434 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1435 brp_list
[brp_1
].control
= control_ctx
;
1436 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1437 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].brpn
,
1438 brp_list
[brp_1
].value
);
1439 if (retval
!= ERROR_OK
)
1441 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1442 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].brpn
,
1443 brp_list
[brp_1
].control
);
1444 if (retval
!= ERROR_OK
)
1447 control_iva
= ((iva_machmode
& 0x7) << 20)
1450 | (iva_byte_addr_select
<< 5)
1452 brp_list
[brp_2
].used
= 1;
1453 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1454 brp_list
[brp_2
].control
= control_iva
;
1455 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1456 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].brpn
,
1457 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1458 if (retval
!= ERROR_OK
)
1460 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1461 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].brpn
,
1462 brp_list
[brp_2
].value
>> 32);
1463 if (retval
!= ERROR_OK
)
1465 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1466 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].brpn
,
1467 brp_list
[brp_2
].control
);
1468 if (retval
!= ERROR_OK
)
1474 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1477 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1478 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1479 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1481 if (!breakpoint
->is_set
) {
1482 LOG_WARNING("breakpoint not set");
1486 if (breakpoint
->type
== BKPT_HARD
) {
1487 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1488 int brp_i
= breakpoint
->number
;
1489 int brp_j
= breakpoint
->linked_brp
;
1490 if (brp_i
>= aarch64
->brp_num
) {
1491 LOG_DEBUG("Invalid BRP number in breakpoint");
1494 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1495 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1496 brp_list
[brp_i
].used
= 0;
1497 brp_list
[brp_i
].value
= 0;
1498 brp_list
[brp_i
].control
= 0;
1499 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1500 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1501 brp_list
[brp_i
].control
);
1502 if (retval
!= ERROR_OK
)
1504 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1505 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1506 (uint32_t)brp_list
[brp_i
].value
);
1507 if (retval
!= ERROR_OK
)
1509 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1510 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].brpn
,
1511 (uint32_t)brp_list
[brp_i
].value
);
1512 if (retval
!= ERROR_OK
)
1514 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1515 LOG_DEBUG("Invalid BRP number in breakpoint");
1518 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1519 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1520 brp_list
[brp_j
].used
= 0;
1521 brp_list
[brp_j
].value
= 0;
1522 brp_list
[brp_j
].control
= 0;
1523 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1524 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].brpn
,
1525 brp_list
[brp_j
].control
);
1526 if (retval
!= ERROR_OK
)
1528 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1529 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].brpn
,
1530 (uint32_t)brp_list
[brp_j
].value
);
1531 if (retval
!= ERROR_OK
)
1533 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1534 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].brpn
,
1535 (uint32_t)brp_list
[brp_j
].value
);
1536 if (retval
!= ERROR_OK
)
1539 breakpoint
->linked_brp
= 0;
1540 breakpoint
->is_set
= false;
1544 int brp_i
= breakpoint
->number
;
1545 if (brp_i
>= aarch64
->brp_num
) {
1546 LOG_DEBUG("Invalid BRP number in breakpoint");
1549 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1550 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1551 brp_list
[brp_i
].used
= 0;
1552 brp_list
[brp_i
].value
= 0;
1553 brp_list
[brp_i
].control
= 0;
1554 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1555 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1556 brp_list
[brp_i
].control
);
1557 if (retval
!= ERROR_OK
)
1559 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1560 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].brpn
,
1561 brp_list
[brp_i
].value
);
1562 if (retval
!= ERROR_OK
)
1565 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1566 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].brpn
,
1567 (uint32_t)brp_list
[brp_i
].value
);
1568 if (retval
!= ERROR_OK
)
1570 breakpoint
->is_set
= false;
1574 /* restore original instruction (kept in target endianness) */
1576 armv8_cache_d_inner_flush_virt(armv8
,
1577 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1578 breakpoint
->length
);
1580 if (breakpoint
->length
== 4) {
1581 retval
= target_write_memory(target
,
1582 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1583 4, 1, breakpoint
->orig_instr
);
1584 if (retval
!= ERROR_OK
)
1587 retval
= target_write_memory(target
,
1588 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1589 2, 1, breakpoint
->orig_instr
);
1590 if (retval
!= ERROR_OK
)
1594 armv8_cache_d_inner_flush_virt(armv8
,
1595 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1596 breakpoint
->length
);
1598 armv8_cache_i_inner_inval_virt(armv8
,
1599 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1600 breakpoint
->length
);
1602 breakpoint
->is_set
= false;
1607 static int aarch64_add_breakpoint(struct target
*target
,
1608 struct breakpoint
*breakpoint
)
1610 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1612 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1613 LOG_INFO("no hardware breakpoint available");
1614 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1617 if (breakpoint
->type
== BKPT_HARD
)
1618 aarch64
->brp_num_available
--;
1620 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1623 static int aarch64_add_context_breakpoint(struct target
*target
,
1624 struct breakpoint
*breakpoint
)
1626 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1628 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1629 LOG_INFO("no hardware breakpoint available");
1630 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1633 if (breakpoint
->type
== BKPT_HARD
)
1634 aarch64
->brp_num_available
--;
1636 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1639 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1640 struct breakpoint
*breakpoint
)
1642 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1644 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1645 LOG_INFO("no hardware breakpoint available");
1646 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1649 if (breakpoint
->type
== BKPT_HARD
)
1650 aarch64
->brp_num_available
--;
1652 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1655 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1657 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1660 /* It is perfectly possible to remove breakpoints while the target is running */
1661 if (target
->state
!= TARGET_HALTED
) {
1662 LOG_WARNING("target not halted");
1663 return ERROR_TARGET_NOT_HALTED
;
1667 if (breakpoint
->is_set
) {
1668 aarch64_unset_breakpoint(target
, breakpoint
);
1669 if (breakpoint
->type
== BKPT_HARD
)
1670 aarch64
->brp_num_available
++;
1676 /* Setup hardware Watchpoint Register Pair */
1677 static int aarch64_set_watchpoint(struct target
*target
,
1678 struct watchpoint
*watchpoint
)
1682 uint32_t control
, offset
, length
;
1683 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1684 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1685 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1687 if (watchpoint
->is_set
) {
1688 LOG_WARNING("watchpoint already set");
1692 while (wp_list
[wp_i
].used
&& (wp_i
< aarch64
->wp_num
))
1694 if (wp_i
>= aarch64
->wp_num
) {
1695 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1696 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1699 control
= (1 << 0) /* enable */
1700 | (3 << 1) /* both user and privileged access */
1701 | (1 << 13); /* higher mode control */
1703 switch (watchpoint
->rw
) {
1715 /* Match up to 8 bytes. */
1716 offset
= watchpoint
->address
& 7;
1717 length
= watchpoint
->length
;
1718 if (offset
+ length
> sizeof(uint64_t)) {
1719 length
= sizeof(uint64_t) - offset
;
1720 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1722 for (; length
> 0; offset
++, length
--)
1723 control
|= (1 << offset
) << 5;
1725 wp_list
[wp_i
].value
= watchpoint
->address
& 0xFFFFFFFFFFFFFFF8ULL
;
1726 wp_list
[wp_i
].control
= control
;
1728 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1729 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1730 (uint32_t)(wp_list
[wp_i
].value
& 0xFFFFFFFF));
1731 if (retval
!= ERROR_OK
)
1733 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1734 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].brpn
,
1735 (uint32_t)(wp_list
[wp_i
].value
>> 32));
1736 if (retval
!= ERROR_OK
)
1739 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1740 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1742 if (retval
!= ERROR_OK
)
1744 LOG_DEBUG("wp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, wp_i
,
1745 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1747 /* Ensure that halting debug mode is enable */
1748 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1749 if (retval
!= ERROR_OK
) {
1750 LOG_DEBUG("Failed to set DSCR.HDE");
1754 wp_list
[wp_i
].used
= 1;
1755 watchpoint_set(watchpoint
, wp_i
);
1760 /* Clear hardware Watchpoint Register Pair */
1761 static int aarch64_unset_watchpoint(struct target
*target
,
1762 struct watchpoint
*watchpoint
)
1765 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1766 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1767 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1769 if (!watchpoint
->is_set
) {
1770 LOG_WARNING("watchpoint not set");
1774 int wp_i
= watchpoint
->number
;
1775 if (wp_i
>= aarch64
->wp_num
) {
1776 LOG_DEBUG("Invalid WP number in watchpoint");
1779 LOG_DEBUG("rwp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, wp_i
,
1780 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1781 wp_list
[wp_i
].used
= 0;
1782 wp_list
[wp_i
].value
= 0;
1783 wp_list
[wp_i
].control
= 0;
1784 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1785 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1786 wp_list
[wp_i
].control
);
1787 if (retval
!= ERROR_OK
)
1789 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1790 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].brpn
,
1791 wp_list
[wp_i
].value
);
1792 if (retval
!= ERROR_OK
)
1795 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1796 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].brpn
,
1797 (uint32_t)wp_list
[wp_i
].value
);
1798 if (retval
!= ERROR_OK
)
1800 watchpoint
->is_set
= false;
1805 static int aarch64_add_watchpoint(struct target
*target
,
1806 struct watchpoint
*watchpoint
)
1809 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1811 if (aarch64
->wp_num_available
< 1) {
1812 LOG_INFO("no hardware watchpoint available");
1813 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1816 retval
= aarch64_set_watchpoint(target
, watchpoint
);
1817 if (retval
== ERROR_OK
)
1818 aarch64
->wp_num_available
--;
1823 static int aarch64_remove_watchpoint(struct target
*target
,
1824 struct watchpoint
*watchpoint
)
1826 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1828 if (watchpoint
->is_set
) {
1829 aarch64_unset_watchpoint(target
, watchpoint
);
1830 aarch64
->wp_num_available
++;
1837 * find out which watchpoint hits
1838 * get exception address and compare the address to watchpoints
1840 static int aarch64_hit_watchpoint(struct target
*target
,
1841 struct watchpoint
**hit_watchpoint
)
1843 if (target
->debug_reason
!= DBG_REASON_WATCHPOINT
)
1846 struct armv8_common
*armv8
= target_to_armv8(target
);
1848 target_addr_t exception_address
;
1849 struct watchpoint
*wp
;
1851 exception_address
= armv8
->dpm
.wp_addr
;
1853 if (exception_address
== 0xFFFFFFFF)
1856 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
)
1857 if (exception_address
>= wp
->address
&& exception_address
< (wp
->address
+ wp
->length
)) {
1858 *hit_watchpoint
= wp
;
1866 * Cortex-A8 Reset functions
1869 static int aarch64_enable_reset_catch(struct target
*target
, bool enable
)
1871 struct armv8_common
*armv8
= target_to_armv8(target
);
1875 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1876 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1877 LOG_DEBUG("EDECR = 0x%08" PRIx32
", enable=%d", edecr
, enable
);
1878 if (retval
!= ERROR_OK
)
1886 return mem_ap_write_atomic_u32(armv8
->debug_ap
,
1887 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1890 static int aarch64_clear_reset_catch(struct target
*target
)
1892 struct armv8_common
*armv8
= target_to_armv8(target
);
1897 /* check if Reset Catch debug event triggered as expected */
1898 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1899 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &edesr
);
1900 if (retval
!= ERROR_OK
)
1903 was_triggered
= !!(edesr
& ESR_RC
);
1904 LOG_DEBUG("Reset Catch debug event %s",
1905 was_triggered
? "triggered" : "NOT triggered!");
1907 if (was_triggered
) {
1908 /* clear pending Reset Catch debug event */
1910 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1911 armv8
->debug_base
+ CPUV8_DBG_EDESR
, edesr
);
1912 if (retval
!= ERROR_OK
)
1919 static int aarch64_assert_reset(struct target
*target
)
1921 struct armv8_common
*armv8
= target_to_armv8(target
);
1922 enum reset_types reset_config
= jtag_get_reset_config();
1927 /* Issue some kind of warm reset. */
1928 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1929 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1930 else if (reset_config
& RESET_HAS_SRST
) {
1931 bool srst_asserted
= false;
1933 if (target
->reset_halt
&& !(reset_config
& RESET_SRST_PULLS_TRST
)) {
1934 if (target_was_examined(target
)) {
1936 if (reset_config
& RESET_SRST_NO_GATING
) {
1938 * SRST needs to be asserted *before* Reset Catch
1939 * debug event can be set up.
1941 adapter_assert_reset();
1942 srst_asserted
= true;
1945 /* make sure to clear all sticky errors */
1946 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1947 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1949 /* set up Reset Catch debug event to halt the CPU after reset */
1950 retval
= aarch64_enable_reset_catch(target
, true);
1951 if (retval
!= ERROR_OK
)
1952 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1953 target_name(target
));
1955 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1956 target_name(target
));
1960 /* REVISIT handle "pulls" cases, if there's
1961 * hardware that needs them to work.
1964 adapter_assert_reset();
1966 LOG_ERROR("%s: how to reset?", target_name(target
));
1970 /* registers are now invalid */
1971 if (target_was_examined(target
)) {
1972 register_cache_invalidate(armv8
->arm
.core_cache
);
1973 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1976 target
->state
= TARGET_RESET
;
1981 static int aarch64_deassert_reset(struct target
*target
)
1987 /* be certain SRST is off */
1988 adapter_deassert_reset();
1990 if (!target_was_examined(target
))
1993 retval
= aarch64_init_debug_access(target
);
1994 if (retval
!= ERROR_OK
)
1997 retval
= aarch64_poll(target
);
1998 if (retval
!= ERROR_OK
)
2001 if (target
->reset_halt
) {
2002 /* clear pending Reset Catch debug event */
2003 retval
= aarch64_clear_reset_catch(target
);
2004 if (retval
!= ERROR_OK
)
2005 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2006 target_name(target
));
2008 /* disable Reset Catch debug event */
2009 retval
= aarch64_enable_reset_catch(target
, false);
2010 if (retval
!= ERROR_OK
)
2011 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2012 target_name(target
));
2014 if (target
->state
!= TARGET_HALTED
) {
2015 LOG_WARNING("%s: ran after reset and before halt ...",
2016 target_name(target
));
2017 if (target_was_examined(target
)) {
2018 retval
= aarch64_halt_one(target
, HALT_LAZY
);
2019 if (retval
!= ERROR_OK
)
2022 target
->state
= TARGET_UNKNOWN
;
2030 static int aarch64_write_cpu_memory_slow(struct target
*target
,
2031 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2033 struct armv8_common
*armv8
= target_to_armv8(target
);
2034 struct arm_dpm
*dpm
= &armv8
->dpm
;
2035 struct arm
*arm
= &armv8
->arm
;
2038 armv8_reg_current(arm
, 1)->dirty
= true;
2040 /* change DCC to normal mode if necessary */
2041 if (*dscr
& DSCR_MA
) {
2043 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2044 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2045 if (retval
!= ERROR_OK
)
2050 uint32_t data
, opcode
;
2052 /* write the data to store into DTRRX */
2056 data
= target_buffer_get_u16(target
, buffer
);
2058 data
= target_buffer_get_u32(target
, buffer
);
2059 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2060 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
2061 if (retval
!= ERROR_OK
)
2064 if (arm
->core_state
== ARM_STATE_AARCH64
)
2065 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
2067 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2068 if (retval
!= ERROR_OK
)
2072 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
2074 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
2076 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
2077 retval
= dpm
->instr_execute(dpm
, opcode
);
2078 if (retval
!= ERROR_OK
)
2089 static int aarch64_write_cpu_memory_fast(struct target
*target
,
2090 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2092 struct armv8_common
*armv8
= target_to_armv8(target
);
2093 struct arm
*arm
= &armv8
->arm
;
2096 armv8_reg_current(arm
, 1)->dirty
= true;
2098 /* Step 1.d - Change DCC to memory mode */
2100 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2101 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2102 if (retval
!= ERROR_OK
)
2106 /* Step 2.a - Do the write */
2107 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
2108 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
2109 if (retval
!= ERROR_OK
)
2112 /* Step 3.a - Switch DTR mode back to Normal mode */
2114 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2115 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2116 if (retval
!= ERROR_OK
)
2122 static int aarch64_write_cpu_memory(struct target
*target
,
2123 uint64_t address
, uint32_t size
,
2124 uint32_t count
, const uint8_t *buffer
)
2126 /* write memory through APB-AP */
2127 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2128 struct armv8_common
*armv8
= target_to_armv8(target
);
2129 struct arm_dpm
*dpm
= &armv8
->dpm
;
2130 struct arm
*arm
= &armv8
->arm
;
2133 if (target
->state
!= TARGET_HALTED
) {
2134 LOG_WARNING("target not halted");
2135 return ERROR_TARGET_NOT_HALTED
;
2138 /* Mark register X0 as dirty, as it will be used
2139 * for transferring the data.
2140 * It will be restored automatically when exiting
2143 armv8_reg_current(arm
, 0)->dirty
= true;
2145 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2148 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2149 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2150 if (retval
!= ERROR_OK
)
2153 /* Set Normal access mode */
2154 dscr
= (dscr
& ~DSCR_MA
);
2155 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2156 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2157 if (retval
!= ERROR_OK
)
2160 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2161 /* Write X0 with value 'address' using write procedure */
2162 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2163 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2164 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2165 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2167 /* Write R0 with value 'address' using write procedure */
2168 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2169 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2170 retval
= dpm
->instr_write_data_dcc(dpm
,
2171 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2174 if (retval
!= ERROR_OK
)
2177 if (size
== 4 && (address
% 4) == 0)
2178 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2180 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2182 if (retval
!= ERROR_OK
) {
2183 /* Unset DTR mode */
2184 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2185 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2187 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2188 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2191 /* Check for sticky abort flags in the DSCR */
2192 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2193 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2194 if (retval
!= ERROR_OK
)
2198 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2199 /* Abort occurred - clear it and exit */
2200 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2201 armv8_dpm_handle_exception(dpm
, true);
2209 static int aarch64_read_cpu_memory_slow(struct target
*target
,
2210 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2212 struct armv8_common
*armv8
= target_to_armv8(target
);
2213 struct arm_dpm
*dpm
= &armv8
->dpm
;
2214 struct arm
*arm
= &armv8
->arm
;
2217 armv8_reg_current(arm
, 1)->dirty
= true;
2219 /* change DCC to normal mode (if necessary) */
2220 if (*dscr
& DSCR_MA
) {
2222 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2223 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2224 if (retval
!= ERROR_OK
)
2229 uint32_t opcode
, data
;
2232 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
2234 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
2236 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
2237 retval
= dpm
->instr_execute(dpm
, opcode
);
2238 if (retval
!= ERROR_OK
)
2241 if (arm
->core_state
== ARM_STATE_AARCH64
)
2242 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
2244 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2245 if (retval
!= ERROR_OK
)
2248 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2249 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
2250 if (retval
!= ERROR_OK
)
2254 *buffer
= (uint8_t)data
;
2256 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
2258 target_buffer_set_u32(target
, buffer
, data
);
2268 static int aarch64_read_cpu_memory_fast(struct target
*target
,
2269 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2271 struct armv8_common
*armv8
= target_to_armv8(target
);
2272 struct arm_dpm
*dpm
= &armv8
->dpm
;
2273 struct arm
*arm
= &armv8
->arm
;
2277 /* Mark X1 as dirty */
2278 armv8_reg_current(arm
, 1)->dirty
= true;
2280 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2281 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2282 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
2284 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2285 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2288 if (retval
!= ERROR_OK
)
2291 /* Step 1.e - Change DCC to memory mode */
2293 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2294 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2295 if (retval
!= ERROR_OK
)
2298 /* Step 1.f - read DBGDTRTX and discard the value */
2299 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2300 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2301 if (retval
!= ERROR_OK
)
2305 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2306 * Abort flags are sticky, so can be read at end of transactions
2308 * This data is read in aligned to 32 bit boundary.
2312 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2313 * increments X0 by 4. */
2314 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
2315 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2316 if (retval
!= ERROR_OK
)
2320 /* Step 3.a - set DTR access mode back to Normal mode */
2322 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2323 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2324 if (retval
!= ERROR_OK
)
2327 /* Step 3.b - read DBGDTRTX for the final value */
2328 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2329 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2330 if (retval
!= ERROR_OK
)
2333 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2337 static int aarch64_read_cpu_memory(struct target
*target
,
2338 target_addr_t address
, uint32_t size
,
2339 uint32_t count
, uint8_t *buffer
)
2341 /* read memory through APB-AP */
2342 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2343 struct armv8_common
*armv8
= target_to_armv8(target
);
2344 struct arm_dpm
*dpm
= &armv8
->dpm
;
2345 struct arm
*arm
= &armv8
->arm
;
2348 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2349 address
, size
, count
);
2351 if (target
->state
!= TARGET_HALTED
) {
2352 LOG_WARNING("target not halted");
2353 return ERROR_TARGET_NOT_HALTED
;
2356 /* Mark register X0 as dirty, as it will be used
2357 * for transferring the data.
2358 * It will be restored automatically when exiting
2361 armv8_reg_current(arm
, 0)->dirty
= true;
2364 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2365 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2366 if (retval
!= ERROR_OK
)
2369 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2371 /* Set Normal access mode */
2373 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2374 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2375 if (retval
!= ERROR_OK
)
2378 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2379 /* Write X0 with value 'address' using write procedure */
2380 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2381 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2382 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2383 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2385 /* Write R0 with value 'address' using write procedure */
2386 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2387 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2388 retval
= dpm
->instr_write_data_dcc(dpm
,
2389 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2392 if (retval
!= ERROR_OK
)
2395 if (size
== 4 && (address
% 4) == 0)
2396 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2398 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2400 if (dscr
& DSCR_MA
) {
2402 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2403 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2406 if (retval
!= ERROR_OK
)
2409 /* Check for sticky abort flags in the DSCR */
2410 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2411 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2412 if (retval
!= ERROR_OK
)
2417 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2418 /* Abort occurred - clear it and exit */
2419 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2420 armv8_dpm_handle_exception(dpm
, true);
2428 static int aarch64_read_phys_memory(struct target
*target
,
2429 target_addr_t address
, uint32_t size
,
2430 uint32_t count
, uint8_t *buffer
)
2432 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2434 if (count
&& buffer
) {
2435 /* read memory through APB-AP */
2436 retval
= aarch64_mmu_modify(target
, 0);
2437 if (retval
!= ERROR_OK
)
2439 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2444 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2445 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2447 int mmu_enabled
= 0;
2450 /* determine if MMU was enabled on target stop */
2451 retval
= aarch64_mmu(target
, &mmu_enabled
);
2452 if (retval
!= ERROR_OK
)
2456 /* enable MMU as we could have disabled it for phys access */
2457 retval
= aarch64_mmu_modify(target
, 1);
2458 if (retval
!= ERROR_OK
)
2461 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2464 static int aarch64_write_phys_memory(struct target
*target
,
2465 target_addr_t address
, uint32_t size
,
2466 uint32_t count
, const uint8_t *buffer
)
2468 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2470 if (count
&& buffer
) {
2471 /* write memory through APB-AP */
2472 retval
= aarch64_mmu_modify(target
, 0);
2473 if (retval
!= ERROR_OK
)
2475 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2481 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2482 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2484 int mmu_enabled
= 0;
2487 /* determine if MMU was enabled on target stop */
2488 retval
= aarch64_mmu(target
, &mmu_enabled
);
2489 if (retval
!= ERROR_OK
)
2493 /* enable MMU as we could have disabled it for phys access */
2494 retval
= aarch64_mmu_modify(target
, 1);
2495 if (retval
!= ERROR_OK
)
2498 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2501 static int aarch64_handle_target_request(void *priv
)
2503 struct target
*target
= priv
;
2504 struct armv8_common
*armv8
= target_to_armv8(target
);
2507 if (!target_was_examined(target
))
2509 if (!target
->dbg_msg_enabled
)
2512 if (target
->state
== TARGET_RUNNING
) {
2515 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2516 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2518 /* check if we have data */
2519 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2520 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2521 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2522 if (retval
== ERROR_OK
) {
2523 target_request(target
, request
);
2524 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2525 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2533 static int aarch64_examine_first(struct target
*target
)
2535 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2536 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2537 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2538 struct aarch64_private_config
*pc
= target
->private_config
;
2540 int retval
= ERROR_OK
;
2541 uint64_t debug
, ttypr
;
2543 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2544 debug
= ttypr
= cpuid
= 0;
2549 if (armv8
->debug_ap
) {
2550 dap_put_ap(armv8
->debug_ap
);
2551 armv8
->debug_ap
= NULL
;
2554 if (pc
->adiv5_config
.ap_num
== DP_APSEL_INVALID
) {
2555 /* Search for the APB-AB */
2556 retval
= dap_find_get_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2557 if (retval
!= ERROR_OK
) {
2558 LOG_ERROR("Could not find APB-AP for debug access");
2562 armv8
->debug_ap
= dap_get_ap(swjdp
, pc
->adiv5_config
.ap_num
);
2563 if (!armv8
->debug_ap
) {
2564 LOG_ERROR("Cannot get AP");
2569 retval
= mem_ap_init(armv8
->debug_ap
);
2570 if (retval
!= ERROR_OK
) {
2571 LOG_ERROR("Could not initialize the APB-AP");
2575 armv8
->debug_ap
->memaccess_tck
= 10;
2577 if (!target
->dbgbase_set
) {
2578 /* Lookup Processor DAP */
2579 retval
= dap_lookup_cs_component(armv8
->debug_ap
, ARM_CS_C9_DEVTYPE_CORE_DEBUG
,
2580 &armv8
->debug_base
, target
->coreid
);
2581 if (retval
!= ERROR_OK
)
2583 LOG_DEBUG("Detected core %" PRId32
" dbgbase: " TARGET_ADDR_FMT
,
2584 target
->coreid
, armv8
->debug_base
);
2586 armv8
->debug_base
= target
->dbgbase
;
2588 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2589 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2590 if (retval
!= ERROR_OK
) {
2591 LOG_DEBUG("Examine %s failed", "oslock");
2595 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2596 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2597 if (retval
!= ERROR_OK
) {
2598 LOG_DEBUG("Examine %s failed", "CPUID");
2602 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2603 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2604 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2605 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2606 if (retval
!= ERROR_OK
) {
2607 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2610 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2611 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2612 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2613 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2614 if (retval
!= ERROR_OK
) {
2615 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2619 retval
= dap_run(armv8
->debug_ap
->dap
);
2620 if (retval
!= ERROR_OK
) {
2621 LOG_ERROR("%s: examination failed\n", target_name(target
));
2626 ttypr
= (ttypr
<< 32) | tmp0
;
2628 debug
= (debug
<< 32) | tmp2
;
2630 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2631 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2632 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2635 LOG_TARGET_ERROR(target
, "CTI not specified");
2639 armv8
->cti
= pc
->cti
;
2641 retval
= aarch64_dpm_setup(aarch64
, debug
);
2642 if (retval
!= ERROR_OK
)
2645 /* Setup Breakpoint Register Pairs */
2646 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2647 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2648 aarch64
->brp_num_available
= aarch64
->brp_num
;
2649 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2650 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2651 aarch64
->brp_list
[i
].used
= 0;
2652 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2653 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2655 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2656 aarch64
->brp_list
[i
].value
= 0;
2657 aarch64
->brp_list
[i
].control
= 0;
2658 aarch64
->brp_list
[i
].brpn
= i
;
2661 /* Setup Watchpoint Register Pairs */
2662 aarch64
->wp_num
= (uint32_t)((debug
>> 20) & 0x0F) + 1;
2663 aarch64
->wp_num_available
= aarch64
->wp_num
;
2664 aarch64
->wp_list
= calloc(aarch64
->wp_num
, sizeof(struct aarch64_brp
));
2665 for (i
= 0; i
< aarch64
->wp_num
; i
++) {
2666 aarch64
->wp_list
[i
].used
= 0;
2667 aarch64
->wp_list
[i
].type
= BRP_NORMAL
;
2668 aarch64
->wp_list
[i
].value
= 0;
2669 aarch64
->wp_list
[i
].control
= 0;
2670 aarch64
->wp_list
[i
].brpn
= i
;
2673 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2674 aarch64
->brp_num
, aarch64
->wp_num
);
2676 target
->state
= TARGET_UNKNOWN
;
2677 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2678 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2679 target_set_examined(target
);
2683 static int aarch64_examine(struct target
*target
)
2685 int retval
= ERROR_OK
;
2687 /* don't re-probe hardware after each reset */
2688 if (!target_was_examined(target
))
2689 retval
= aarch64_examine_first(target
);
2691 /* Configure core debug access */
2692 if (retval
== ERROR_OK
)
2693 retval
= aarch64_init_debug_access(target
);
2699 * Cortex-A8 target creation and initialization
2702 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2703 struct target
*target
)
2705 /* examine_first() does a bunch of this */
2706 arm_semihosting_init(target
);
2710 static int aarch64_init_arch_info(struct target
*target
,
2711 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2713 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2715 /* Setup struct aarch64_common */
2716 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2717 armv8
->arm
.dap
= dap
;
2719 /* register arch-specific functions */
2720 armv8
->examine_debug_reason
= NULL
;
2721 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2722 armv8
->pre_restore_context
= NULL
;
2723 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2725 armv8_init_arch_info(target
, armv8
);
2726 target_register_timer_callback(aarch64_handle_target_request
, 1,
2727 TARGET_TIMER_TYPE_PERIODIC
, target
);
2732 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2734 struct aarch64_private_config
*pc
= target
->private_config
;
2735 struct aarch64_common
*aarch64
;
2737 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2740 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2742 LOG_ERROR("Out of memory");
2746 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2749 static void aarch64_deinit_target(struct target
*target
)
2751 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2752 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2753 struct arm_dpm
*dpm
= &armv8
->dpm
;
2755 if (armv8
->debug_ap
)
2756 dap_put_ap(armv8
->debug_ap
);
2758 armv8_free_reg_cache(target
);
2759 free(aarch64
->brp_list
);
2762 free(target
->private_config
);
2766 static int aarch64_mmu(struct target
*target
, int *enabled
)
2768 if (target
->state
!= TARGET_HALTED
) {
2769 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2770 return ERROR_TARGET_INVALID
;
2773 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2777 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2778 target_addr_t
*phys
)
2780 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2784 * private target configuration items
2786 enum aarch64_cfg_param
{
2790 static const struct jim_nvp nvp_config_opts
[] = {
2791 { .name
= "-cti", .value
= CFG_CTI
},
2792 { .name
= NULL
, .value
= -1 }
2795 static int aarch64_jim_configure(struct target
*target
, struct jim_getopt_info
*goi
)
2797 struct aarch64_private_config
*pc
;
2801 pc
= (struct aarch64_private_config
*)target
->private_config
;
2803 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2804 pc
->adiv5_config
.ap_num
= DP_APSEL_INVALID
;
2805 target
->private_config
= pc
;
2809 * Call adiv5_jim_configure() to parse the common DAP options
2810 * It will return JIM_CONTINUE if it didn't find any known
2811 * options, JIM_OK if it correctly parsed the topmost option
2812 * and JIM_ERR if an error occurred during parameter evaluation.
2813 * For JIM_CONTINUE, we check our own params.
2815 * adiv5_jim_configure() assumes 'private_config' to point to
2816 * 'struct adiv5_private_config'. Override 'private_config'!
2818 target
->private_config
= &pc
->adiv5_config
;
2819 e
= adiv5_jim_configure(target
, goi
);
2820 target
->private_config
= pc
;
2821 if (e
!= JIM_CONTINUE
)
2824 /* parse config or cget options ... */
2825 if (goi
->argc
> 0) {
2826 Jim_SetEmptyResult(goi
->interp
);
2828 /* check first if topmost item is for us */
2829 e
= jim_nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2832 return JIM_CONTINUE
;
2834 e
= jim_getopt_obj(goi
, NULL
);
2840 if (goi
->isconfigure
) {
2842 struct arm_cti
*cti
;
2843 e
= jim_getopt_obj(goi
, &o_cti
);
2846 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2848 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2853 if (goi
->argc
!= 0) {
2854 Jim_WrongNumArgs(goi
->interp
,
2855 goi
->argc
, goi
->argv
,
2860 if (!pc
|| !pc
->cti
) {
2861 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2864 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2870 return JIM_CONTINUE
;
2877 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2879 struct target
*target
= get_current_target(CMD_CTX
);
2880 struct armv8_common
*armv8
= target_to_armv8(target
);
2882 return armv8_handle_cache_info_command(CMD
,
2883 &armv8
->armv8_mmu
.armv8_cache
);
2886 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2888 struct target
*target
= get_current_target(CMD_CTX
);
2889 if (!target_was_examined(target
)) {
2890 LOG_ERROR("target not examined yet");
2894 return aarch64_init_debug_access(target
);
2897 COMMAND_HANDLER(aarch64_handle_disassemble_command
)
2899 struct target
*target
= get_current_target(CMD_CTX
);
2902 LOG_ERROR("No target selected");
2906 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2908 if (aarch64
->common_magic
!= AARCH64_COMMON_MAGIC
) {
2909 command_print(CMD
, "current target isn't an AArch64");
2914 target_addr_t address
;
2918 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], count
);
2921 COMMAND_PARSE_ADDRESS(CMD_ARGV
[0], address
);
2924 return ERROR_COMMAND_SYNTAX_ERROR
;
2927 return a64_disassemble(CMD
, target
, address
, count
);
2930 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2932 struct target
*target
= get_current_target(CMD_CTX
);
2933 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2935 static const struct jim_nvp nvp_maskisr_modes
[] = {
2936 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2937 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2938 { .name
= NULL
, .value
= -1 },
2940 const struct jim_nvp
*n
;
2943 n
= jim_nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2945 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2946 return ERROR_COMMAND_SYNTAX_ERROR
;
2949 aarch64
->isrmasking_mode
= n
->value
;
2952 n
= jim_nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2953 command_print(CMD
, "aarch64 interrupt mask %s", n
->name
);
2958 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2960 struct command
*c
= jim_to_command(interp
);
2961 struct command_context
*context
;
2962 struct target
*target
;
2965 bool is_mcr
= false;
2968 if (!strcmp(c
->name
, "mcr")) {
2975 context
= current_command_context(interp
);
2978 target
= get_current_target(context
);
2980 LOG_ERROR("%s: no current target", __func__
);
2983 if (!target_was_examined(target
)) {
2984 LOG_ERROR("%s: not yet examined", target_name(target
));
2988 arm
= target_to_arm(target
);
2990 LOG_ERROR("%s: not an ARM", target_name(target
));
2994 if (target
->state
!= TARGET_HALTED
)
2995 return ERROR_TARGET_NOT_HALTED
;
2997 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2998 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
3002 if (argc
!= arg_cnt
) {
3003 LOG_ERROR("%s: wrong number of arguments", __func__
);
3015 /* NOTE: parameter sequence matches ARM instruction set usage:
3016 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3017 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3018 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3020 retval
= Jim_GetLong(interp
, argv
[1], &l
);
3021 if (retval
!= JIM_OK
)
3024 LOG_ERROR("%s: %s %d out of range", __func__
,
3025 "coprocessor", (int) l
);
3030 retval
= Jim_GetLong(interp
, argv
[2], &l
);
3031 if (retval
!= JIM_OK
)
3034 LOG_ERROR("%s: %s %d out of range", __func__
,
3040 retval
= Jim_GetLong(interp
, argv
[3], &l
);
3041 if (retval
!= JIM_OK
)
3044 LOG_ERROR("%s: %s %d out of range", __func__
,
3050 retval
= Jim_GetLong(interp
, argv
[4], &l
);
3051 if (retval
!= JIM_OK
)
3054 LOG_ERROR("%s: %s %d out of range", __func__
,
3060 retval
= Jim_GetLong(interp
, argv
[5], &l
);
3061 if (retval
!= JIM_OK
)
3064 LOG_ERROR("%s: %s %d out of range", __func__
,
3072 if (is_mcr
== true) {
3073 retval
= Jim_GetLong(interp
, argv
[6], &l
);
3074 if (retval
!= JIM_OK
)
3078 /* NOTE: parameters reordered! */
3079 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3080 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, crn
, crm
, value
);
3081 if (retval
!= ERROR_OK
)
3084 /* NOTE: parameters reordered! */
3085 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3086 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, crn
, crm
, &value
);
3087 if (retval
!= ERROR_OK
)
3090 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
3096 static const struct command_registration aarch64_exec_command_handlers
[] = {
3098 .name
= "cache_info",
3099 .handler
= aarch64_handle_cache_info_command
,
3100 .mode
= COMMAND_EXEC
,
3101 .help
= "display information about target caches",
3106 .handler
= aarch64_handle_dbginit_command
,
3107 .mode
= COMMAND_EXEC
,
3108 .help
= "Initialize core debug",
3112 .name
= "disassemble",
3113 .handler
= aarch64_handle_disassemble_command
,
3114 .mode
= COMMAND_EXEC
,
3115 .help
= "Disassemble instructions",
3116 .usage
= "address [count]",
3120 .handler
= aarch64_mask_interrupts_command
,
3121 .mode
= COMMAND_ANY
,
3122 .help
= "mask aarch64 interrupts during single-step",
3123 .usage
= "['on'|'off']",
3127 .mode
= COMMAND_EXEC
,
3128 .jim_handler
= jim_mcrmrc
,
3129 .help
= "write coprocessor register",
3130 .usage
= "cpnum op1 CRn CRm op2 value",
3134 .mode
= COMMAND_EXEC
,
3135 .jim_handler
= jim_mcrmrc
,
3136 .help
= "read coprocessor register",
3137 .usage
= "cpnum op1 CRn CRm op2",
3140 .chain
= smp_command_handlers
,
3144 COMMAND_REGISTRATION_DONE
3147 static const struct command_registration aarch64_command_handlers
[] = {
3150 .mode
= COMMAND_ANY
,
3151 .help
= "ARM Command Group",
3153 .chain
= semihosting_common_handlers
3156 .chain
= armv8_command_handlers
,
3160 .mode
= COMMAND_ANY
,
3161 .help
= "Aarch64 command group",
3163 .chain
= aarch64_exec_command_handlers
,
3165 COMMAND_REGISTRATION_DONE
3168 struct target_type aarch64_target
= {
3171 .poll
= aarch64_poll
,
3172 .arch_state
= armv8_arch_state
,
3174 .halt
= aarch64_halt
,
3175 .resume
= aarch64_resume
,
3176 .step
= aarch64_step
,
3178 .assert_reset
= aarch64_assert_reset
,
3179 .deassert_reset
= aarch64_deassert_reset
,
3181 /* REVISIT allow exporting VFP3 registers ... */
3182 .get_gdb_arch
= armv8_get_gdb_arch
,
3183 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
3185 .read_memory
= aarch64_read_memory
,
3186 .write_memory
= aarch64_write_memory
,
3188 .add_breakpoint
= aarch64_add_breakpoint
,
3189 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
3190 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
3191 .remove_breakpoint
= aarch64_remove_breakpoint
,
3192 .add_watchpoint
= aarch64_add_watchpoint
,
3193 .remove_watchpoint
= aarch64_remove_watchpoint
,
3194 .hit_watchpoint
= aarch64_hit_watchpoint
,
3196 .commands
= aarch64_command_handlers
,
3197 .target_create
= aarch64_target_create
,
3198 .target_jim_configure
= aarch64_jim_configure
,
3199 .init_target
= aarch64_init_target
,
3200 .deinit_target
= aarch64_deinit_target
,
3201 .examine
= aarch64_examine
,
3203 .read_phys_memory
= aarch64_read_phys_memory
,
3204 .write_phys_memory
= aarch64_write_phys_memory
,
3206 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)