1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
26 #include "a64_disassembler.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_semihosting.h"
33 #include "jtag/interface.h"
35 #include <helper/time_support.h>
47 struct aarch64_private_config
{
48 struct adiv5_private_config adiv5_config
;
52 static int aarch64_poll(struct target
*target
);
53 static int aarch64_debug_entry(struct target
*target
);
54 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
55 static int aarch64_set_breakpoint(struct target
*target
,
56 struct breakpoint
*breakpoint
, uint8_t matchmode
);
57 static int aarch64_set_context_breakpoint(struct target
*target
,
58 struct breakpoint
*breakpoint
, uint8_t matchmode
);
59 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
60 struct breakpoint
*breakpoint
);
61 static int aarch64_unset_breakpoint(struct target
*target
,
62 struct breakpoint
*breakpoint
);
63 static int aarch64_mmu(struct target
*target
, int *enabled
);
64 static int aarch64_virt2phys(struct target
*target
,
65 target_addr_t virt
, target_addr_t
*phys
);
66 static int aarch64_read_cpu_memory(struct target
*target
,
67 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
69 static int aarch64_restore_system_control_reg(struct target
*target
)
71 enum arm_mode target_mode
= ARM_MODE_ANY
;
72 int retval
= ERROR_OK
;
75 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
76 struct armv8_common
*armv8
= target_to_armv8(target
);
78 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
79 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82 switch (armv8
->arm
.core_mode
) {
84 target_mode
= ARMV8_64_EL1H
;
88 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
92 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
96 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
105 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
109 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
110 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
114 if (target_mode
!= ARM_MODE_ANY
)
115 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
117 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
118 if (retval
!= ERROR_OK
)
121 if (target_mode
!= ARM_MODE_ANY
)
122 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
128 /* modify system_control_reg in order to enable or disable mmu for :
129 * - virt2phys address conversion
130 * - read or write memory in phys or virt address */
131 static int aarch64_mmu_modify(struct target
*target
, int enable
)
133 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
134 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
135 int retval
= ERROR_OK
;
136 enum arm_mode target_mode
= ARM_MODE_ANY
;
140 /* if mmu enabled at target stop and mmu not enable */
141 if (!(aarch64
->system_control_reg
& 0x1U
)) {
142 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
145 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
146 aarch64
->system_control_reg_curr
|= 0x1U
;
148 if (aarch64
->system_control_reg_curr
& 0x4U
) {
149 /* data cache is active */
150 aarch64
->system_control_reg_curr
&= ~0x4U
;
151 /* flush data cache armv8 function to be called */
152 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
153 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
155 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
156 aarch64
->system_control_reg_curr
&= ~0x1U
;
160 switch (armv8
->arm
.core_mode
) {
162 target_mode
= ARMV8_64_EL1H
;
166 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
170 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
174 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
183 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
187 LOG_DEBUG("unknown cpu state 0x%x", armv8
->arm
.core_mode
);
190 if (target_mode
!= ARM_MODE_ANY
)
191 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
193 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
194 aarch64
->system_control_reg_curr
);
196 if (target_mode
!= ARM_MODE_ANY
)
197 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
203 * Basic debug access, very low level assumes state is saved
205 static int aarch64_init_debug_access(struct target
*target
)
207 struct armv8_common
*armv8
= target_to_armv8(target
);
211 LOG_DEBUG("%s", target_name(target
));
213 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
214 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
215 if (retval
!= ERROR_OK
) {
216 LOG_DEBUG("Examine %s failed", "oslock");
220 /* Clear Sticky Power Down status Bit in PRSR to enable access to
221 the registers in the Core Power Domain */
222 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
223 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
224 if (retval
!= ERROR_OK
)
228 * Static CTI configuration:
229 * Channel 0 -> trigger outputs HALT request to PE
230 * Channel 1 -> trigger outputs Resume request to PE
231 * Gate all channel trigger events from entering the CTM
235 retval
= arm_cti_enable(armv8
->cti
, true);
236 /* By default, gate all channel events to and from the CTM */
237 if (retval
== ERROR_OK
)
238 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
239 /* output halt requests to PE on channel 0 event */
240 if (retval
== ERROR_OK
)
241 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
242 /* output restart requests to PE on channel 1 event */
243 if (retval
== ERROR_OK
)
244 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
245 if (retval
!= ERROR_OK
)
248 /* Resync breakpoint registers */
253 /* Write to memory mapped registers directly with no cache or mmu handling */
254 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
259 struct armv8_common
*armv8
= target_to_armv8(target
);
261 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
266 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
268 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
271 dpm
->arm
= &a8
->armv8_common
.arm
;
274 retval
= armv8_dpm_setup(dpm
);
275 if (retval
== ERROR_OK
)
276 retval
= armv8_dpm_initialize(dpm
);
281 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
283 struct armv8_common
*armv8
= target_to_armv8(target
);
284 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
287 static int aarch64_check_state_one(struct target
*target
,
288 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
290 struct armv8_common
*armv8
= target_to_armv8(target
);
294 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
295 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
296 if (retval
!= ERROR_OK
)
303 *p_result
= (prsr
& mask
) == (val
& mask
);
308 static int aarch64_wait_halt_one(struct target
*target
)
310 int retval
= ERROR_OK
;
313 int64_t then
= timeval_ms();
317 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
318 if (retval
!= ERROR_OK
|| halted
)
321 if (timeval_ms() > then
+ 1000) {
322 retval
= ERROR_TARGET_TIMEOUT
;
323 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
330 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
332 int retval
= ERROR_OK
;
333 struct target_list
*head
= target
->head
;
334 struct target
*first
= NULL
;
336 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
338 while (head
!= NULL
) {
339 struct target
*curr
= head
->target
;
340 struct armv8_common
*armv8
= target_to_armv8(curr
);
343 if (exc_target
&& curr
== target
)
345 if (!target_was_examined(curr
))
347 if (curr
->state
!= TARGET_RUNNING
)
350 /* HACK: mark this target as prepared for halting */
351 curr
->debug_reason
= DBG_REASON_DBGRQ
;
353 /* open the gate for channel 0 to let HALT requests pass to the CTM */
354 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
355 if (retval
== ERROR_OK
)
356 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
357 if (retval
!= ERROR_OK
)
360 LOG_DEBUG("target %s prepared", target_name(curr
));
367 if (exc_target
&& first
)
376 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
378 int retval
= ERROR_OK
;
379 struct armv8_common
*armv8
= target_to_armv8(target
);
381 LOG_DEBUG("%s", target_name(target
));
383 /* allow Halting Debug Mode */
384 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
385 if (retval
!= ERROR_OK
)
388 /* trigger an event on channel 0, this outputs a halt request to the PE */
389 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
390 if (retval
!= ERROR_OK
)
393 if (mode
== HALT_SYNC
) {
394 retval
= aarch64_wait_halt_one(target
);
395 if (retval
!= ERROR_OK
) {
396 if (retval
== ERROR_TARGET_TIMEOUT
)
397 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
405 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
407 struct target
*next
= target
;
410 /* prepare halt on all PEs of the group */
411 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
413 if (exc_target
&& next
== target
)
416 /* halt the target PE */
417 if (retval
== ERROR_OK
)
418 retval
= aarch64_halt_one(next
, HALT_LAZY
);
420 if (retval
!= ERROR_OK
)
423 /* wait for all PEs to halt */
424 int64_t then
= timeval_ms();
426 bool all_halted
= true;
427 struct target_list
*head
;
430 foreach_smp_target(head
, target
->head
) {
435 if (!target_was_examined(curr
))
438 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
439 if (retval
!= ERROR_OK
|| !halted
) {
448 if (timeval_ms() > then
+ 1000) {
449 retval
= ERROR_TARGET_TIMEOUT
;
454 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
455 * and it looks like the CTI's are not connected by a common
456 * trigger matrix. It seems that we need to halt one core in each
457 * cluster explicitly. So if we find that a core has not halted
458 * yet, we trigger an explicit halt for the second cluster.
460 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
461 if (retval
!= ERROR_OK
)
468 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
470 struct target
*gdb_target
= NULL
;
471 struct target_list
*head
;
474 if (debug_reason
== DBG_REASON_NOTHALTED
) {
475 LOG_DEBUG("Halting remaining targets in SMP group");
476 aarch64_halt_smp(target
, true);
479 /* poll all targets in the group, but skip the target that serves GDB */
480 foreach_smp_target(head
, target
->head
) {
482 /* skip calling context */
485 if (!target_was_examined(curr
))
487 /* skip targets that were already halted */
488 if (curr
->state
== TARGET_HALTED
)
490 /* remember the gdb_service->target */
491 if (curr
->gdb_service
!= NULL
)
492 gdb_target
= curr
->gdb_service
->target
;
494 if (curr
== gdb_target
)
497 /* avoid recursion in aarch64_poll() */
503 /* after all targets were updated, poll the gdb serving target */
504 if (gdb_target
!= NULL
&& gdb_target
!= target
)
505 aarch64_poll(gdb_target
);
511 * Aarch64 Run control
514 static int aarch64_poll(struct target
*target
)
516 enum target_state prev_target_state
;
517 int retval
= ERROR_OK
;
520 retval
= aarch64_check_state_one(target
,
521 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
522 if (retval
!= ERROR_OK
)
526 prev_target_state
= target
->state
;
527 if (prev_target_state
!= TARGET_HALTED
) {
528 enum target_debug_reason debug_reason
= target
->debug_reason
;
530 /* We have a halting debug event */
531 target
->state
= TARGET_HALTED
;
532 LOG_DEBUG("Target %s halted", target_name(target
));
533 retval
= aarch64_debug_entry(target
);
534 if (retval
!= ERROR_OK
)
538 update_halt_gdb(target
, debug_reason
);
540 if (arm_semihosting(target
, &retval
) != 0)
543 switch (prev_target_state
) {
547 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
549 case TARGET_DEBUG_RUNNING
:
550 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
557 target
->state
= TARGET_RUNNING
;
562 static int aarch64_halt(struct target
*target
)
564 struct armv8_common
*armv8
= target_to_armv8(target
);
565 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
568 return aarch64_halt_smp(target
, false);
570 return aarch64_halt_one(target
, HALT_SYNC
);
573 static int aarch64_restore_one(struct target
*target
, int current
,
574 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
576 struct armv8_common
*armv8
= target_to_armv8(target
);
577 struct arm
*arm
= &armv8
->arm
;
581 LOG_DEBUG("%s", target_name(target
));
583 if (!debug_execution
)
584 target_free_all_working_areas(target
);
586 /* current = 1: continue on current pc, otherwise continue at <address> */
587 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
589 resume_pc
= *address
;
591 *address
= resume_pc
;
593 /* Make sure that the Armv7 gdb thumb fixups does not
594 * kill the return address
596 switch (arm
->core_state
) {
598 resume_pc
&= 0xFFFFFFFC;
600 case ARM_STATE_AARCH64
:
601 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
603 case ARM_STATE_THUMB
:
604 case ARM_STATE_THUMB_EE
:
605 /* When the return address is loaded into PC
606 * bit 0 must be 1 to stay in Thumb state
610 case ARM_STATE_JAZELLE
:
611 LOG_ERROR("How do I resume into Jazelle state??");
614 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
615 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
616 arm
->pc
->dirty
= true;
617 arm
->pc
->valid
= true;
619 /* called it now before restoring context because it uses cpu
620 * register r0 for restoring system control register */
621 retval
= aarch64_restore_system_control_reg(target
);
622 if (retval
== ERROR_OK
)
623 retval
= aarch64_restore_context(target
, handle_breakpoints
);
629 * prepare single target for restart
633 static int aarch64_prepare_restart_one(struct target
*target
)
635 struct armv8_common
*armv8
= target_to_armv8(target
);
640 LOG_DEBUG("%s", target_name(target
));
642 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
643 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
644 if (retval
!= ERROR_OK
)
647 if ((dscr
& DSCR_ITE
) == 0)
648 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
649 if ((dscr
& DSCR_ERR
) != 0)
650 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
652 /* acknowledge a pending CTI halt event */
653 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
655 * open the CTI gate for channel 1 so that the restart events
656 * get passed along to all PEs. Also close gate for channel 0
657 * to isolate the PE from halt events.
659 if (retval
== ERROR_OK
)
660 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
661 if (retval
== ERROR_OK
)
662 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
664 /* make sure that DSCR.HDE is set */
665 if (retval
== ERROR_OK
) {
667 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
668 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
671 if (retval
== ERROR_OK
) {
672 /* clear sticky bits in PRSR, SDR is now 0 */
673 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
674 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
680 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
682 struct armv8_common
*armv8
= target_to_armv8(target
);
685 LOG_DEBUG("%s", target_name(target
));
687 /* trigger an event on channel 1, generates a restart request to the PE */
688 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
689 if (retval
!= ERROR_OK
)
692 if (mode
== RESTART_SYNC
) {
693 int64_t then
= timeval_ms();
697 * if PRSR.SDR is set now, the target did restart, even
698 * if it's now already halted again (e.g. due to breakpoint)
700 retval
= aarch64_check_state_one(target
,
701 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
702 if (retval
!= ERROR_OK
|| resumed
)
705 if (timeval_ms() > then
+ 1000) {
706 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
707 retval
= ERROR_TARGET_TIMEOUT
;
713 if (retval
!= ERROR_OK
)
716 target
->debug_reason
= DBG_REASON_NOTHALTED
;
717 target
->state
= TARGET_RUNNING
;
722 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
726 LOG_DEBUG("%s", target_name(target
));
728 retval
= aarch64_prepare_restart_one(target
);
729 if (retval
== ERROR_OK
)
730 retval
= aarch64_do_restart_one(target
, mode
);
736 * prepare all but the current target for restart
738 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
740 int retval
= ERROR_OK
;
741 struct target_list
*head
;
742 struct target
*first
= NULL
;
745 foreach_smp_target(head
, target
->head
) {
746 struct target
*curr
= head
->target
;
748 /* skip calling target */
751 if (!target_was_examined(curr
))
753 if (curr
->state
!= TARGET_HALTED
)
756 /* resume at current address, not in step mode */
757 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
758 if (retval
== ERROR_OK
)
759 retval
= aarch64_prepare_restart_one(curr
);
760 if (retval
!= ERROR_OK
) {
761 LOG_ERROR("failed to restore target %s", target_name(curr
));
764 /* remember the first valid target in the group */
776 static int aarch64_step_restart_smp(struct target
*target
)
778 int retval
= ERROR_OK
;
779 struct target_list
*head
;
780 struct target
*first
= NULL
;
782 LOG_DEBUG("%s", target_name(target
));
784 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
785 if (retval
!= ERROR_OK
)
789 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
790 if (retval
!= ERROR_OK
) {
791 LOG_DEBUG("error restarting target %s", target_name(first
));
795 int64_t then
= timeval_ms();
797 struct target
*curr
= target
;
798 bool all_resumed
= true;
800 foreach_smp_target(head
, target
->head
) {
809 if (!target_was_examined(curr
))
812 retval
= aarch64_check_state_one(curr
,
813 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
814 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
819 if (curr
->state
!= TARGET_RUNNING
) {
820 curr
->state
= TARGET_RUNNING
;
821 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
822 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
829 if (timeval_ms() > then
+ 1000) {
830 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
831 retval
= ERROR_TARGET_TIMEOUT
;
835 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
836 * and it looks like the CTI's are not connected by a common
837 * trigger matrix. It seems that we need to halt one core in each
838 * cluster explicitly. So if we find that a core has not halted
839 * yet, we trigger an explicit resume for the second cluster.
841 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
842 if (retval
!= ERROR_OK
)
849 static int aarch64_resume(struct target
*target
, int current
,
850 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
853 uint64_t addr
= address
;
855 struct armv8_common
*armv8
= target_to_armv8(target
);
856 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
858 if (target
->state
!= TARGET_HALTED
)
859 return ERROR_TARGET_NOT_HALTED
;
862 * If this target is part of a SMP group, prepare the others
863 * targets for resuming. This involves restoring the complete
864 * target register context and setting up CTI gates to accept
865 * resume events from the trigger matrix.
868 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
869 if (retval
!= ERROR_OK
)
873 /* all targets prepared, restore and restart the current target */
874 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
876 if (retval
== ERROR_OK
)
877 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
878 if (retval
!= ERROR_OK
)
882 int64_t then
= timeval_ms();
884 struct target
*curr
= target
;
885 struct target_list
*head
;
886 bool all_resumed
= true;
888 foreach_smp_target(head
, target
->head
) {
895 if (!target_was_examined(curr
))
898 retval
= aarch64_check_state_one(curr
,
899 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
900 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
905 if (curr
->state
!= TARGET_RUNNING
) {
906 curr
->state
= TARGET_RUNNING
;
907 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
908 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
915 if (timeval_ms() > then
+ 1000) {
916 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
917 retval
= ERROR_TARGET_TIMEOUT
;
922 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
923 * and it looks like the CTI's are not connected by a common
924 * trigger matrix. It seems that we need to halt one core in each
925 * cluster explicitly. So if we find that a core has not halted
926 * yet, we trigger an explicit resume for the second cluster.
928 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
929 if (retval
!= ERROR_OK
)
934 if (retval
!= ERROR_OK
)
937 target
->debug_reason
= DBG_REASON_NOTHALTED
;
939 if (!debug_execution
) {
940 target
->state
= TARGET_RUNNING
;
941 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
942 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
944 target
->state
= TARGET_DEBUG_RUNNING
;
945 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
946 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
952 static int aarch64_debug_entry(struct target
*target
)
954 int retval
= ERROR_OK
;
955 struct armv8_common
*armv8
= target_to_armv8(target
);
956 struct arm_dpm
*dpm
= &armv8
->dpm
;
957 enum arm_state core_state
;
960 /* make sure to clear all sticky errors */
961 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
962 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
963 if (retval
== ERROR_OK
)
964 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
965 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
966 if (retval
== ERROR_OK
)
967 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
969 if (retval
!= ERROR_OK
)
972 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
975 core_state
= armv8_dpm_get_core_state(dpm
);
976 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
977 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
979 /* close the CTI gate for all events */
980 if (retval
== ERROR_OK
)
981 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
982 /* discard async exceptions */
983 if (retval
== ERROR_OK
)
984 retval
= dpm
->instr_cpsr_sync(dpm
);
985 if (retval
!= ERROR_OK
)
988 /* Examine debug reason */
989 armv8_dpm_report_dscr(dpm
, dscr
);
991 /* save the memory address that triggered the watchpoint */
992 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
995 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
996 armv8
->debug_base
+ CPUV8_DBG_EDWAR0
, &tmp
);
997 if (retval
!= ERROR_OK
)
999 target_addr_t edwar
= tmp
;
1001 /* EDWAR[63:32] has unknown content in aarch32 state */
1002 if (core_state
== ARM_STATE_AARCH64
) {
1003 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1004 armv8
->debug_base
+ CPUV8_DBG_EDWAR1
, &tmp
);
1005 if (retval
!= ERROR_OK
)
1007 edwar
|= ((target_addr_t
)tmp
) << 32;
1010 armv8
->dpm
.wp_addr
= edwar
;
1013 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1015 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1016 retval
= armv8
->post_debug_entry(target
);
1021 static int aarch64_post_debug_entry(struct target
*target
)
1023 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1024 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1026 enum arm_mode target_mode
= ARM_MODE_ANY
;
1029 switch (armv8
->arm
.core_mode
) {
1031 target_mode
= ARMV8_64_EL1H
;
1035 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1039 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1043 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1052 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1056 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1057 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
1061 if (target_mode
!= ARM_MODE_ANY
)
1062 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1064 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1065 if (retval
!= ERROR_OK
)
1068 if (target_mode
!= ARM_MODE_ANY
)
1069 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1071 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1072 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1074 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1075 armv8_identify_cache(armv8
);
1076 armv8_read_mpidr(armv8
);
1079 armv8
->armv8_mmu
.mmu_enabled
=
1080 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1081 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1082 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1083 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1084 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1089 * single-step a target
1091 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1092 int handle_breakpoints
)
1094 struct armv8_common
*armv8
= target_to_armv8(target
);
1095 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1096 int saved_retval
= ERROR_OK
;
1100 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1102 if (target
->state
!= TARGET_HALTED
) {
1103 LOG_WARNING("target not halted");
1104 return ERROR_TARGET_NOT_HALTED
;
1107 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1108 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1109 /* make sure EDECR.SS is not set when restoring the register */
1111 if (retval
== ERROR_OK
) {
1113 /* set EDECR.SS to enter hardware step mode */
1114 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1115 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1117 /* disable interrupts while stepping */
1118 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1119 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1120 /* bail out if stepping setup has failed */
1121 if (retval
!= ERROR_OK
)
1124 if (target
->smp
&& (current
== 1)) {
1126 * isolate current target so that it doesn't get resumed
1127 * together with the others
1129 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1130 /* resume all other targets in the group */
1131 if (retval
== ERROR_OK
)
1132 retval
= aarch64_step_restart_smp(target
);
1133 if (retval
!= ERROR_OK
) {
1134 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1137 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1140 /* all other targets running, restore and restart the current target */
1141 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1142 if (retval
== ERROR_OK
)
1143 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1145 if (retval
!= ERROR_OK
)
1148 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1149 if (!handle_breakpoints
)
1150 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1152 int64_t then
= timeval_ms();
1157 retval
= aarch64_check_state_one(target
,
1158 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1159 if (retval
!= ERROR_OK
|| stepped
)
1162 if (timeval_ms() > then
+ 100) {
1163 LOG_ERROR("timeout waiting for target %s halt after step",
1164 target_name(target
));
1165 retval
= ERROR_TARGET_TIMEOUT
;
1171 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1172 * causes a timeout. The core takes the step but doesn't complete it and so
1173 * debug state is never entered. However, you can manually halt the core
1174 * as an external debug even is also a WFI wakeup event.
1176 if (retval
== ERROR_TARGET_TIMEOUT
)
1177 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1180 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1181 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1182 if (retval
!= ERROR_OK
)
1185 /* restore interrupts */
1186 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1187 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1188 if (retval
!= ERROR_OK
)
1192 if (saved_retval
!= ERROR_OK
)
1193 return saved_retval
;
1198 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1200 struct armv8_common
*armv8
= target_to_armv8(target
);
1201 struct arm
*arm
= &armv8
->arm
;
1205 LOG_DEBUG("%s", target_name(target
));
1207 if (armv8
->pre_restore_context
)
1208 armv8
->pre_restore_context(target
);
1210 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1211 if (retval
== ERROR_OK
) {
1212 /* registers are now invalid */
1213 register_cache_invalidate(arm
->core_cache
);
1214 register_cache_invalidate(arm
->core_cache
->next
);
1221 * Cortex-A8 Breakpoint and watchpoint functions
1224 /* Setup hardware Breakpoint Register Pair */
1225 static int aarch64_set_breakpoint(struct target
*target
,
1226 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1231 uint8_t byte_addr_select
= 0x0F;
1232 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1233 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1234 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1236 if (breakpoint
->set
) {
1237 LOG_WARNING("breakpoint already set");
1241 if (breakpoint
->type
== BKPT_HARD
) {
1243 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1245 if (brp_i
>= aarch64
->brp_num
) {
1246 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1247 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1249 breakpoint
->set
= brp_i
+ 1;
1250 if (breakpoint
->length
== 2)
1251 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1252 control
= ((matchmode
& 0x7) << 20)
1254 | (byte_addr_select
<< 5)
1256 brp_list
[brp_i
].used
= 1;
1257 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1258 brp_list
[brp_i
].control
= control
;
1259 bpt_value
= brp_list
[brp_i
].value
;
1261 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1262 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1263 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1264 if (retval
!= ERROR_OK
)
1266 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1267 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1268 (uint32_t)(bpt_value
>> 32));
1269 if (retval
!= ERROR_OK
)
1272 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1273 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1274 brp_list
[brp_i
].control
);
1275 if (retval
!= ERROR_OK
)
1277 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1278 brp_list
[brp_i
].control
,
1279 brp_list
[brp_i
].value
);
1281 } else if (breakpoint
->type
== BKPT_SOFT
) {
1285 if (armv8_dpm_get_core_state(&armv8
->dpm
) == ARM_STATE_AARCH64
) {
1286 opcode
= ARMV8_HLT(11);
1288 if (breakpoint
->length
!= 4)
1289 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1292 * core_state is ARM_STATE_ARM
1293 * in that case the opcode depends on breakpoint length:
1294 * - if length == 4 => A32 opcode
1295 * - if length == 2 => T32 opcode
1296 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1297 * in that case the length should be changed from 3 to 4 bytes
1299 opcode
= (breakpoint
->length
== 4) ? ARMV8_HLT_A1(11) :
1300 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1302 if (breakpoint
->length
== 3)
1303 breakpoint
->length
= 4;
1306 buf_set_u32(code
, 0, 32, opcode
);
1308 retval
= target_read_memory(target
,
1309 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1310 breakpoint
->length
, 1,
1311 breakpoint
->orig_instr
);
1312 if (retval
!= ERROR_OK
)
1315 armv8_cache_d_inner_flush_virt(armv8
,
1316 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1317 breakpoint
->length
);
1319 retval
= target_write_memory(target
,
1320 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1321 breakpoint
->length
, 1, code
);
1322 if (retval
!= ERROR_OK
)
1325 armv8_cache_d_inner_flush_virt(armv8
,
1326 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1327 breakpoint
->length
);
1329 armv8_cache_i_inner_inval_virt(armv8
,
1330 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1331 breakpoint
->length
);
1333 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1336 /* Ensure that halting debug mode is enable */
1337 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1338 if (retval
!= ERROR_OK
) {
1339 LOG_DEBUG("Failed to set DSCR.HDE");
1346 static int aarch64_set_context_breakpoint(struct target
*target
,
1347 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1349 int retval
= ERROR_FAIL
;
1352 uint8_t byte_addr_select
= 0x0F;
1353 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1354 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1355 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1357 if (breakpoint
->set
) {
1358 LOG_WARNING("breakpoint already set");
1361 /*check available context BRPs*/
1362 while ((brp_list
[brp_i
].used
||
1363 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1366 if (brp_i
>= aarch64
->brp_num
) {
1367 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371 breakpoint
->set
= brp_i
+ 1;
1372 control
= ((matchmode
& 0x7) << 20)
1374 | (byte_addr_select
<< 5)
1376 brp_list
[brp_i
].used
= 1;
1377 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1378 brp_list
[brp_i
].control
= control
;
1379 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1380 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1381 brp_list
[brp_i
].value
);
1382 if (retval
!= ERROR_OK
)
1384 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1385 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1386 brp_list
[brp_i
].control
);
1387 if (retval
!= ERROR_OK
)
1389 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1390 brp_list
[brp_i
].control
,
1391 brp_list
[brp_i
].value
);
1396 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1398 int retval
= ERROR_FAIL
;
1399 int brp_1
= 0; /* holds the contextID pair */
1400 int brp_2
= 0; /* holds the IVA pair */
1401 uint32_t control_CTX
, control_IVA
;
1402 uint8_t CTX_byte_addr_select
= 0x0F;
1403 uint8_t IVA_byte_addr_select
= 0x0F;
1404 uint8_t CTX_machmode
= 0x03;
1405 uint8_t IVA_machmode
= 0x01;
1406 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1407 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1408 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1410 if (breakpoint
->set
) {
1411 LOG_WARNING("breakpoint already set");
1414 /*check available context BRPs*/
1415 while ((brp_list
[brp_1
].used
||
1416 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1419 printf("brp(CTX) found num: %d\n", brp_1
);
1420 if (brp_1
>= aarch64
->brp_num
) {
1421 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1425 while ((brp_list
[brp_2
].used
||
1426 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1429 printf("brp(IVA) found num: %d\n", brp_2
);
1430 if (brp_2
>= aarch64
->brp_num
) {
1431 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1435 breakpoint
->set
= brp_1
+ 1;
1436 breakpoint
->linked_brp
= brp_2
;
1437 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1440 | (CTX_byte_addr_select
<< 5)
1442 brp_list
[brp_1
].used
= 1;
1443 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1444 brp_list
[brp_1
].control
= control_CTX
;
1445 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1446 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1447 brp_list
[brp_1
].value
);
1448 if (retval
!= ERROR_OK
)
1450 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1451 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1452 brp_list
[brp_1
].control
);
1453 if (retval
!= ERROR_OK
)
1456 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1459 | (IVA_byte_addr_select
<< 5)
1461 brp_list
[brp_2
].used
= 1;
1462 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1463 brp_list
[brp_2
].control
= control_IVA
;
1464 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1465 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1466 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1467 if (retval
!= ERROR_OK
)
1469 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1470 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1471 brp_list
[brp_2
].value
>> 32);
1472 if (retval
!= ERROR_OK
)
1474 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1475 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1476 brp_list
[brp_2
].control
);
1477 if (retval
!= ERROR_OK
)
1483 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1486 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1487 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1488 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1490 if (!breakpoint
->set
) {
1491 LOG_WARNING("breakpoint not set");
1495 if (breakpoint
->type
== BKPT_HARD
) {
1496 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1497 int brp_i
= breakpoint
->set
- 1;
1498 int brp_j
= breakpoint
->linked_brp
;
1499 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1500 LOG_DEBUG("Invalid BRP number in breakpoint");
1503 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1504 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1505 brp_list
[brp_i
].used
= 0;
1506 brp_list
[brp_i
].value
= 0;
1507 brp_list
[brp_i
].control
= 0;
1508 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1509 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1510 brp_list
[brp_i
].control
);
1511 if (retval
!= ERROR_OK
)
1513 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1514 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1515 (uint32_t)brp_list
[brp_i
].value
);
1516 if (retval
!= ERROR_OK
)
1518 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1519 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1520 (uint32_t)brp_list
[brp_i
].value
);
1521 if (retval
!= ERROR_OK
)
1523 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1524 LOG_DEBUG("Invalid BRP number in breakpoint");
1527 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1528 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1529 brp_list
[brp_j
].used
= 0;
1530 brp_list
[brp_j
].value
= 0;
1531 brp_list
[brp_j
].control
= 0;
1532 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1533 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1534 brp_list
[brp_j
].control
);
1535 if (retval
!= ERROR_OK
)
1537 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1538 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1539 (uint32_t)brp_list
[brp_j
].value
);
1540 if (retval
!= ERROR_OK
)
1542 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1543 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1544 (uint32_t)brp_list
[brp_j
].value
);
1545 if (retval
!= ERROR_OK
)
1548 breakpoint
->linked_brp
= 0;
1549 breakpoint
->set
= 0;
1553 int brp_i
= breakpoint
->set
- 1;
1554 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1555 LOG_DEBUG("Invalid BRP number in breakpoint");
1558 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1559 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1560 brp_list
[brp_i
].used
= 0;
1561 brp_list
[brp_i
].value
= 0;
1562 brp_list
[brp_i
].control
= 0;
1563 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1564 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1565 brp_list
[brp_i
].control
);
1566 if (retval
!= ERROR_OK
)
1568 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1569 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1570 brp_list
[brp_i
].value
);
1571 if (retval
!= ERROR_OK
)
1574 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1575 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1576 (uint32_t)brp_list
[brp_i
].value
);
1577 if (retval
!= ERROR_OK
)
1579 breakpoint
->set
= 0;
1583 /* restore original instruction (kept in target endianness) */
1585 armv8_cache_d_inner_flush_virt(armv8
,
1586 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1587 breakpoint
->length
);
1589 if (breakpoint
->length
== 4) {
1590 retval
= target_write_memory(target
,
1591 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1592 4, 1, breakpoint
->orig_instr
);
1593 if (retval
!= ERROR_OK
)
1596 retval
= target_write_memory(target
,
1597 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1598 2, 1, breakpoint
->orig_instr
);
1599 if (retval
!= ERROR_OK
)
1603 armv8_cache_d_inner_flush_virt(armv8
,
1604 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1605 breakpoint
->length
);
1607 armv8_cache_i_inner_inval_virt(armv8
,
1608 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1609 breakpoint
->length
);
1611 breakpoint
->set
= 0;
1616 static int aarch64_add_breakpoint(struct target
*target
,
1617 struct breakpoint
*breakpoint
)
1619 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1621 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1622 LOG_INFO("no hardware breakpoint available");
1623 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1626 if (breakpoint
->type
== BKPT_HARD
)
1627 aarch64
->brp_num_available
--;
1629 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1632 static int aarch64_add_context_breakpoint(struct target
*target
,
1633 struct breakpoint
*breakpoint
)
1635 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1637 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1638 LOG_INFO("no hardware breakpoint available");
1639 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1642 if (breakpoint
->type
== BKPT_HARD
)
1643 aarch64
->brp_num_available
--;
1645 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1648 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1649 struct breakpoint
*breakpoint
)
1651 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1653 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1654 LOG_INFO("no hardware breakpoint available");
1655 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1658 if (breakpoint
->type
== BKPT_HARD
)
1659 aarch64
->brp_num_available
--;
1661 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1664 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1666 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1669 /* It is perfectly possible to remove breakpoints while the target is running */
1670 if (target
->state
!= TARGET_HALTED
) {
1671 LOG_WARNING("target not halted");
1672 return ERROR_TARGET_NOT_HALTED
;
1676 if (breakpoint
->set
) {
1677 aarch64_unset_breakpoint(target
, breakpoint
);
1678 if (breakpoint
->type
== BKPT_HARD
)
1679 aarch64
->brp_num_available
++;
1685 /* Setup hardware Watchpoint Register Pair */
1686 static int aarch64_set_watchpoint(struct target
*target
,
1687 struct watchpoint
*watchpoint
)
1691 uint32_t control
, offset
, length
;
1692 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1693 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1694 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1696 if (watchpoint
->set
) {
1697 LOG_WARNING("watchpoint already set");
1701 while (wp_list
[wp_i
].used
&& (wp_i
< aarch64
->wp_num
))
1703 if (wp_i
>= aarch64
->wp_num
) {
1704 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1705 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1708 control
= (1 << 0) /* enable */
1709 | (3 << 1) /* both user and privileged access */
1710 | (1 << 13); /* higher mode control */
1712 switch (watchpoint
->rw
) {
1724 /* Match up to 8 bytes. */
1725 offset
= watchpoint
->address
& 7;
1726 length
= watchpoint
->length
;
1727 if (offset
+ length
> sizeof(uint64_t)) {
1728 length
= sizeof(uint64_t) - offset
;
1729 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1731 for (; length
> 0; offset
++, length
--)
1732 control
|= (1 << offset
) << 5;
1734 wp_list
[wp_i
].value
= watchpoint
->address
& 0xFFFFFFFFFFFFFFF8ULL
;
1735 wp_list
[wp_i
].control
= control
;
1737 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1738 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1739 (uint32_t)(wp_list
[wp_i
].value
& 0xFFFFFFFF));
1740 if (retval
!= ERROR_OK
)
1742 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1743 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].BRPn
,
1744 (uint32_t)(wp_list
[wp_i
].value
>> 32));
1745 if (retval
!= ERROR_OK
)
1748 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1749 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1751 if (retval
!= ERROR_OK
)
1753 LOG_DEBUG("wp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, wp_i
,
1754 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1756 /* Ensure that halting debug mode is enable */
1757 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1758 if (retval
!= ERROR_OK
) {
1759 LOG_DEBUG("Failed to set DSCR.HDE");
1763 wp_list
[wp_i
].used
= 1;
1764 watchpoint
->set
= wp_i
+ 1;
1769 /* Clear hardware Watchpoint Register Pair */
1770 static int aarch64_unset_watchpoint(struct target
*target
,
1771 struct watchpoint
*watchpoint
)
1774 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1775 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1776 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1778 if (!watchpoint
->set
) {
1779 LOG_WARNING("watchpoint not set");
1783 wp_i
= watchpoint
->set
- 1;
1784 if ((wp_i
< 0) || (wp_i
>= aarch64
->wp_num
)) {
1785 LOG_DEBUG("Invalid WP number in watchpoint");
1788 LOG_DEBUG("rwp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, wp_i
,
1789 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1790 wp_list
[wp_i
].used
= 0;
1791 wp_list
[wp_i
].value
= 0;
1792 wp_list
[wp_i
].control
= 0;
1793 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1794 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1795 wp_list
[wp_i
].control
);
1796 if (retval
!= ERROR_OK
)
1798 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1799 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1800 wp_list
[wp_i
].value
);
1801 if (retval
!= ERROR_OK
)
1804 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1805 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].BRPn
,
1806 (uint32_t)wp_list
[wp_i
].value
);
1807 if (retval
!= ERROR_OK
)
1809 watchpoint
->set
= 0;
1814 static int aarch64_add_watchpoint(struct target
*target
,
1815 struct watchpoint
*watchpoint
)
1818 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1820 if (aarch64
->wp_num_available
< 1) {
1821 LOG_INFO("no hardware watchpoint available");
1822 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1825 retval
= aarch64_set_watchpoint(target
, watchpoint
);
1826 if (retval
== ERROR_OK
)
1827 aarch64
->wp_num_available
--;
1832 static int aarch64_remove_watchpoint(struct target
*target
,
1833 struct watchpoint
*watchpoint
)
1835 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1837 if (watchpoint
->set
) {
1838 aarch64_unset_watchpoint(target
, watchpoint
);
1839 aarch64
->wp_num_available
++;
1846 * find out which watchpoint hits
1847 * get exception address and compare the address to watchpoints
1849 int aarch64_hit_watchpoint(struct target
*target
,
1850 struct watchpoint
**hit_watchpoint
)
1852 if (target
->debug_reason
!= DBG_REASON_WATCHPOINT
)
1855 struct armv8_common
*armv8
= target_to_armv8(target
);
1857 target_addr_t exception_address
;
1858 struct watchpoint
*wp
;
1860 exception_address
= armv8
->dpm
.wp_addr
;
1862 if (exception_address
== 0xFFFFFFFF)
1865 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
)
1866 if (exception_address
>= wp
->address
&& exception_address
< (wp
->address
+ wp
->length
)) {
1867 *hit_watchpoint
= wp
;
1875 * Cortex-A8 Reset functions
1878 static int aarch64_enable_reset_catch(struct target
*target
, bool enable
)
1880 struct armv8_common
*armv8
= target_to_armv8(target
);
1884 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1885 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1886 LOG_DEBUG("EDECR = 0x%08" PRIx32
", enable=%d", edecr
, enable
);
1887 if (retval
!= ERROR_OK
)
1895 return mem_ap_write_atomic_u32(armv8
->debug_ap
,
1896 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1899 static int aarch64_clear_reset_catch(struct target
*target
)
1901 struct armv8_common
*armv8
= target_to_armv8(target
);
1906 /* check if Reset Catch debug event triggered as expected */
1907 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1908 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &edesr
);
1909 if (retval
!= ERROR_OK
)
1912 was_triggered
= !!(edesr
& ESR_RC
);
1913 LOG_DEBUG("Reset Catch debug event %s",
1914 was_triggered
? "triggered" : "NOT triggered!");
1916 if (was_triggered
) {
1917 /* clear pending Reset Catch debug event */
1919 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1920 armv8
->debug_base
+ CPUV8_DBG_EDESR
, edesr
);
1921 if (retval
!= ERROR_OK
)
1928 static int aarch64_assert_reset(struct target
*target
)
1930 struct armv8_common
*armv8
= target_to_armv8(target
);
1931 enum reset_types reset_config
= jtag_get_reset_config();
1936 /* Issue some kind of warm reset. */
1937 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1938 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1939 else if (reset_config
& RESET_HAS_SRST
) {
1940 bool srst_asserted
= false;
1942 if (target
->reset_halt
) {
1943 if (target_was_examined(target
)) {
1945 if (reset_config
& RESET_SRST_NO_GATING
) {
1947 * SRST needs to be asserted *before* Reset Catch
1948 * debug event can be set up.
1950 adapter_assert_reset();
1951 srst_asserted
= true;
1953 /* make sure to clear all sticky errors */
1954 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1955 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1958 /* set up Reset Catch debug event to halt the CPU after reset */
1959 retval
= aarch64_enable_reset_catch(target
, true);
1960 if (retval
!= ERROR_OK
)
1961 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1962 target_name(target
));
1964 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1965 target_name(target
));
1969 /* REVISIT handle "pulls" cases, if there's
1970 * hardware that needs them to work.
1973 adapter_assert_reset();
1975 LOG_ERROR("%s: how to reset?", target_name(target
));
1979 /* registers are now invalid */
1980 if (target_was_examined(target
)) {
1981 register_cache_invalidate(armv8
->arm
.core_cache
);
1982 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1985 target
->state
= TARGET_RESET
;
1990 static int aarch64_deassert_reset(struct target
*target
)
1996 /* be certain SRST is off */
1997 adapter_deassert_reset();
1999 if (!target_was_examined(target
))
2002 retval
= aarch64_init_debug_access(target
);
2003 if (retval
!= ERROR_OK
)
2006 retval
= aarch64_poll(target
);
2007 if (retval
!= ERROR_OK
)
2010 if (target
->reset_halt
) {
2011 /* clear pending Reset Catch debug event */
2012 retval
= aarch64_clear_reset_catch(target
);
2013 if (retval
!= ERROR_OK
)
2014 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2015 target_name(target
));
2017 /* disable Reset Catch debug event */
2018 retval
= aarch64_enable_reset_catch(target
, false);
2019 if (retval
!= ERROR_OK
)
2020 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2021 target_name(target
));
2023 if (target
->state
!= TARGET_HALTED
) {
2024 LOG_WARNING("%s: ran after reset and before halt ...",
2025 target_name(target
));
2026 retval
= target_halt(target
);
2027 if (retval
!= ERROR_OK
)
2035 static int aarch64_write_cpu_memory_slow(struct target
*target
,
2036 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2038 struct armv8_common
*armv8
= target_to_armv8(target
);
2039 struct arm_dpm
*dpm
= &armv8
->dpm
;
2040 struct arm
*arm
= &armv8
->arm
;
2043 armv8_reg_current(arm
, 1)->dirty
= true;
2045 /* change DCC to normal mode if necessary */
2046 if (*dscr
& DSCR_MA
) {
2048 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2049 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2050 if (retval
!= ERROR_OK
)
2055 uint32_t data
, opcode
;
2057 /* write the data to store into DTRRX */
2061 data
= target_buffer_get_u16(target
, buffer
);
2063 data
= target_buffer_get_u32(target
, buffer
);
2064 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2065 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
2066 if (retval
!= ERROR_OK
)
2069 if (arm
->core_state
== ARM_STATE_AARCH64
)
2070 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
2072 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2073 if (retval
!= ERROR_OK
)
2077 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
2079 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
2081 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
2082 retval
= dpm
->instr_execute(dpm
, opcode
);
2083 if (retval
!= ERROR_OK
)
2094 static int aarch64_write_cpu_memory_fast(struct target
*target
,
2095 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2097 struct armv8_common
*armv8
= target_to_armv8(target
);
2098 struct arm
*arm
= &armv8
->arm
;
2101 armv8_reg_current(arm
, 1)->dirty
= true;
2103 /* Step 1.d - Change DCC to memory mode */
2105 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2106 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2107 if (retval
!= ERROR_OK
)
2111 /* Step 2.a - Do the write */
2112 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
2113 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
2114 if (retval
!= ERROR_OK
)
2117 /* Step 3.a - Switch DTR mode back to Normal mode */
2119 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2120 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2121 if (retval
!= ERROR_OK
)
2127 static int aarch64_write_cpu_memory(struct target
*target
,
2128 uint64_t address
, uint32_t size
,
2129 uint32_t count
, const uint8_t *buffer
)
2131 /* write memory through APB-AP */
2132 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2133 struct armv8_common
*armv8
= target_to_armv8(target
);
2134 struct arm_dpm
*dpm
= &armv8
->dpm
;
2135 struct arm
*arm
= &armv8
->arm
;
2138 if (target
->state
!= TARGET_HALTED
) {
2139 LOG_WARNING("target not halted");
2140 return ERROR_TARGET_NOT_HALTED
;
2143 /* Mark register X0 as dirty, as it will be used
2144 * for transferring the data.
2145 * It will be restored automatically when exiting
2148 armv8_reg_current(arm
, 0)->dirty
= true;
2150 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2153 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2154 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2155 if (retval
!= ERROR_OK
)
2158 /* Set Normal access mode */
2159 dscr
= (dscr
& ~DSCR_MA
);
2160 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2161 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2162 if (retval
!= ERROR_OK
)
2165 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2166 /* Write X0 with value 'address' using write procedure */
2167 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2168 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2169 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2170 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2172 /* Write R0 with value 'address' using write procedure */
2173 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2174 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2175 retval
= dpm
->instr_write_data_dcc(dpm
,
2176 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2179 if (retval
!= ERROR_OK
)
2182 if (size
== 4 && (address
% 4) == 0)
2183 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2185 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2187 if (retval
!= ERROR_OK
) {
2188 /* Unset DTR mode */
2189 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2190 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2192 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2193 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2196 /* Check for sticky abort flags in the DSCR */
2197 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2198 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2199 if (retval
!= ERROR_OK
)
2203 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2204 /* Abort occurred - clear it and exit */
2205 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2206 armv8_dpm_handle_exception(dpm
, true);
2214 static int aarch64_read_cpu_memory_slow(struct target
*target
,
2215 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2217 struct armv8_common
*armv8
= target_to_armv8(target
);
2218 struct arm_dpm
*dpm
= &armv8
->dpm
;
2219 struct arm
*arm
= &armv8
->arm
;
2222 armv8_reg_current(arm
, 1)->dirty
= true;
2224 /* change DCC to normal mode (if necessary) */
2225 if (*dscr
& DSCR_MA
) {
2227 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2228 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2229 if (retval
!= ERROR_OK
)
2234 uint32_t opcode
, data
;
2237 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
2239 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
2241 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
2242 retval
= dpm
->instr_execute(dpm
, opcode
);
2243 if (retval
!= ERROR_OK
)
2246 if (arm
->core_state
== ARM_STATE_AARCH64
)
2247 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
2249 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2250 if (retval
!= ERROR_OK
)
2253 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2254 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
2255 if (retval
!= ERROR_OK
)
2259 *buffer
= (uint8_t)data
;
2261 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
2263 target_buffer_set_u32(target
, buffer
, data
);
2273 static int aarch64_read_cpu_memory_fast(struct target
*target
,
2274 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2276 struct armv8_common
*armv8
= target_to_armv8(target
);
2277 struct arm_dpm
*dpm
= &armv8
->dpm
;
2278 struct arm
*arm
= &armv8
->arm
;
2282 /* Mark X1 as dirty */
2283 armv8_reg_current(arm
, 1)->dirty
= true;
2285 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2286 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2287 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
2289 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2290 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2293 if (retval
!= ERROR_OK
)
2296 /* Step 1.e - Change DCC to memory mode */
2298 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2299 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2300 if (retval
!= ERROR_OK
)
2303 /* Step 1.f - read DBGDTRTX and discard the value */
2304 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2305 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2306 if (retval
!= ERROR_OK
)
2310 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2311 * Abort flags are sticky, so can be read at end of transactions
2313 * This data is read in aligned to 32 bit boundary.
2317 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2318 * increments X0 by 4. */
2319 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
2320 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2321 if (retval
!= ERROR_OK
)
2325 /* Step 3.a - set DTR access mode back to Normal mode */
2327 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2328 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2329 if (retval
!= ERROR_OK
)
2332 /* Step 3.b - read DBGDTRTX for the final value */
2333 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2334 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2335 if (retval
!= ERROR_OK
)
2338 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2342 static int aarch64_read_cpu_memory(struct target
*target
,
2343 target_addr_t address
, uint32_t size
,
2344 uint32_t count
, uint8_t *buffer
)
2346 /* read memory through APB-AP */
2347 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2348 struct armv8_common
*armv8
= target_to_armv8(target
);
2349 struct arm_dpm
*dpm
= &armv8
->dpm
;
2350 struct arm
*arm
= &armv8
->arm
;
2353 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2354 address
, size
, count
);
2356 if (target
->state
!= TARGET_HALTED
) {
2357 LOG_WARNING("target not halted");
2358 return ERROR_TARGET_NOT_HALTED
;
2361 /* Mark register X0 as dirty, as it will be used
2362 * for transferring the data.
2363 * It will be restored automatically when exiting
2366 armv8_reg_current(arm
, 0)->dirty
= true;
2369 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2370 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2371 if (retval
!= ERROR_OK
)
2374 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2376 /* Set Normal access mode */
2378 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2379 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2380 if (retval
!= ERROR_OK
)
2383 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2384 /* Write X0 with value 'address' using write procedure */
2385 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2386 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2387 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2388 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2390 /* Write R0 with value 'address' using write procedure */
2391 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2392 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2393 retval
= dpm
->instr_write_data_dcc(dpm
,
2394 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2397 if (retval
!= ERROR_OK
)
2400 if (size
== 4 && (address
% 4) == 0)
2401 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2403 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2405 if (dscr
& DSCR_MA
) {
2407 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2408 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2411 if (retval
!= ERROR_OK
)
2414 /* Check for sticky abort flags in the DSCR */
2415 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2416 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2417 if (retval
!= ERROR_OK
)
2422 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2423 /* Abort occurred - clear it and exit */
2424 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2425 armv8_dpm_handle_exception(dpm
, true);
2433 static int aarch64_read_phys_memory(struct target
*target
,
2434 target_addr_t address
, uint32_t size
,
2435 uint32_t count
, uint8_t *buffer
)
2437 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2439 if (count
&& buffer
) {
2440 /* read memory through APB-AP */
2441 retval
= aarch64_mmu_modify(target
, 0);
2442 if (retval
!= ERROR_OK
)
2444 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2449 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2450 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2452 int mmu_enabled
= 0;
2455 /* determine if MMU was enabled on target stop */
2456 retval
= aarch64_mmu(target
, &mmu_enabled
);
2457 if (retval
!= ERROR_OK
)
2461 /* enable MMU as we could have disabled it for phys access */
2462 retval
= aarch64_mmu_modify(target
, 1);
2463 if (retval
!= ERROR_OK
)
2466 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2469 static int aarch64_write_phys_memory(struct target
*target
,
2470 target_addr_t address
, uint32_t size
,
2471 uint32_t count
, const uint8_t *buffer
)
2473 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2475 if (count
&& buffer
) {
2476 /* write memory through APB-AP */
2477 retval
= aarch64_mmu_modify(target
, 0);
2478 if (retval
!= ERROR_OK
)
2480 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2486 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2487 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2489 int mmu_enabled
= 0;
2492 /* determine if MMU was enabled on target stop */
2493 retval
= aarch64_mmu(target
, &mmu_enabled
);
2494 if (retval
!= ERROR_OK
)
2498 /* enable MMU as we could have disabled it for phys access */
2499 retval
= aarch64_mmu_modify(target
, 1);
2500 if (retval
!= ERROR_OK
)
2503 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2506 static int aarch64_handle_target_request(void *priv
)
2508 struct target
*target
= priv
;
2509 struct armv8_common
*armv8
= target_to_armv8(target
);
2512 if (!target_was_examined(target
))
2514 if (!target
->dbg_msg_enabled
)
2517 if (target
->state
== TARGET_RUNNING
) {
2520 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2521 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2523 /* check if we have data */
2524 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2525 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2526 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2527 if (retval
== ERROR_OK
) {
2528 target_request(target
, request
);
2529 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2530 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2538 static int aarch64_examine_first(struct target
*target
)
2540 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2541 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2542 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2543 struct aarch64_private_config
*pc
= target
->private_config
;
2545 int retval
= ERROR_OK
;
2546 uint64_t debug
, ttypr
;
2548 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2549 debug
= ttypr
= cpuid
= 0;
2554 if (pc
->adiv5_config
.ap_num
== DP_APSEL_INVALID
) {
2555 /* Search for the APB-AB */
2556 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2557 if (retval
!= ERROR_OK
) {
2558 LOG_ERROR("Could not find APB-AP for debug access");
2562 armv8
->debug_ap
= dap_ap(swjdp
, pc
->adiv5_config
.ap_num
);
2565 retval
= mem_ap_init(armv8
->debug_ap
);
2566 if (retval
!= ERROR_OK
) {
2567 LOG_ERROR("Could not initialize the APB-AP");
2571 armv8
->debug_ap
->memaccess_tck
= 10;
2573 if (!target
->dbgbase_set
) {
2575 /* Get ROM Table base */
2577 int32_t coreidx
= target
->coreid
;
2578 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2579 if (retval
!= ERROR_OK
)
2581 /* Lookup 0x15 -- Processor DAP */
2582 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2583 &armv8
->debug_base
, &coreidx
);
2584 if (retval
!= ERROR_OK
)
2586 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2587 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2589 armv8
->debug_base
= target
->dbgbase
;
2591 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2592 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2593 if (retval
!= ERROR_OK
) {
2594 LOG_DEBUG("Examine %s failed", "oslock");
2598 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2599 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2600 if (retval
!= ERROR_OK
) {
2601 LOG_DEBUG("Examine %s failed", "CPUID");
2605 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2606 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2607 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2608 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2609 if (retval
!= ERROR_OK
) {
2610 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2613 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2614 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2615 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2616 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2617 if (retval
!= ERROR_OK
) {
2618 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2622 retval
= dap_run(armv8
->debug_ap
->dap
);
2623 if (retval
!= ERROR_OK
) {
2624 LOG_ERROR("%s: examination failed\n", target_name(target
));
2629 ttypr
= (ttypr
<< 32) | tmp0
;
2631 debug
= (debug
<< 32) | tmp2
;
2633 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2634 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2635 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2637 if (pc
->cti
== NULL
)
2640 armv8
->cti
= pc
->cti
;
2642 retval
= aarch64_dpm_setup(aarch64
, debug
);
2643 if (retval
!= ERROR_OK
)
2646 /* Setup Breakpoint Register Pairs */
2647 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2648 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2649 aarch64
->brp_num_available
= aarch64
->brp_num
;
2650 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2651 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2652 aarch64
->brp_list
[i
].used
= 0;
2653 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2654 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2656 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2657 aarch64
->brp_list
[i
].value
= 0;
2658 aarch64
->brp_list
[i
].control
= 0;
2659 aarch64
->brp_list
[i
].BRPn
= i
;
2662 /* Setup Watchpoint Register Pairs */
2663 aarch64
->wp_num
= (uint32_t)((debug
>> 20) & 0x0F) + 1;
2664 aarch64
->wp_num_available
= aarch64
->wp_num
;
2665 aarch64
->wp_list
= calloc(aarch64
->wp_num
, sizeof(struct aarch64_brp
));
2666 for (i
= 0; i
< aarch64
->wp_num
; i
++) {
2667 aarch64
->wp_list
[i
].used
= 0;
2668 aarch64
->wp_list
[i
].type
= BRP_NORMAL
;
2669 aarch64
->wp_list
[i
].value
= 0;
2670 aarch64
->wp_list
[i
].control
= 0;
2671 aarch64
->wp_list
[i
].BRPn
= i
;
2674 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2675 aarch64
->brp_num
, aarch64
->wp_num
);
2677 target
->state
= TARGET_UNKNOWN
;
2678 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2679 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2680 target_set_examined(target
);
2684 static int aarch64_examine(struct target
*target
)
2686 int retval
= ERROR_OK
;
2688 /* don't re-probe hardware after each reset */
2689 if (!target_was_examined(target
))
2690 retval
= aarch64_examine_first(target
);
2692 /* Configure core debug access */
2693 if (retval
== ERROR_OK
)
2694 retval
= aarch64_init_debug_access(target
);
2700 * Cortex-A8 target creation and initialization
2703 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2704 struct target
*target
)
2706 /* examine_first() does a bunch of this */
2707 arm_semihosting_init(target
);
2711 static int aarch64_init_arch_info(struct target
*target
,
2712 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2714 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2716 /* Setup struct aarch64_common */
2717 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2718 armv8
->arm
.dap
= dap
;
2720 /* register arch-specific functions */
2721 armv8
->examine_debug_reason
= NULL
;
2722 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2723 armv8
->pre_restore_context
= NULL
;
2724 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2726 armv8_init_arch_info(target
, armv8
);
2727 target_register_timer_callback(aarch64_handle_target_request
, 1,
2728 TARGET_TIMER_TYPE_PERIODIC
, target
);
2733 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2735 struct aarch64_private_config
*pc
= target
->private_config
;
2736 struct aarch64_common
*aarch64
;
2738 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2741 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2742 if (aarch64
== NULL
) {
2743 LOG_ERROR("Out of memory");
2747 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2750 static void aarch64_deinit_target(struct target
*target
)
2752 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2753 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2754 struct arm_dpm
*dpm
= &armv8
->dpm
;
2756 armv8_free_reg_cache(target
);
2757 free(aarch64
->brp_list
);
2760 free(target
->private_config
);
2764 static int aarch64_mmu(struct target
*target
, int *enabled
)
2766 if (target
->state
!= TARGET_HALTED
) {
2767 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2768 return ERROR_TARGET_INVALID
;
2771 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2775 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2776 target_addr_t
*phys
)
2778 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2782 * private target configuration items
2784 enum aarch64_cfg_param
{
2788 static const struct jim_nvp nvp_config_opts
[] = {
2789 { .name
= "-cti", .value
= CFG_CTI
},
2790 { .name
= NULL
, .value
= -1 }
2793 static int aarch64_jim_configure(struct target
*target
, struct jim_getopt_info
*goi
)
2795 struct aarch64_private_config
*pc
;
2799 pc
= (struct aarch64_private_config
*)target
->private_config
;
2801 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2802 pc
->adiv5_config
.ap_num
= DP_APSEL_INVALID
;
2803 target
->private_config
= pc
;
2807 * Call adiv5_jim_configure() to parse the common DAP options
2808 * It will return JIM_CONTINUE if it didn't find any known
2809 * options, JIM_OK if it correctly parsed the topmost option
2810 * and JIM_ERR if an error occurred during parameter evaluation.
2811 * For JIM_CONTINUE, we check our own params.
2813 * adiv5_jim_configure() assumes 'private_config' to point to
2814 * 'struct adiv5_private_config'. Override 'private_config'!
2816 target
->private_config
= &pc
->adiv5_config
;
2817 e
= adiv5_jim_configure(target
, goi
);
2818 target
->private_config
= pc
;
2819 if (e
!= JIM_CONTINUE
)
2822 /* parse config or cget options ... */
2823 if (goi
->argc
> 0) {
2824 Jim_SetEmptyResult(goi
->interp
);
2826 /* check first if topmost item is for us */
2827 e
= jim_nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2830 return JIM_CONTINUE
;
2832 e
= jim_getopt_obj(goi
, NULL
);
2838 if (goi
->isconfigure
) {
2840 struct arm_cti
*cti
;
2841 e
= jim_getopt_obj(goi
, &o_cti
);
2844 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2846 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2851 if (goi
->argc
!= 0) {
2852 Jim_WrongNumArgs(goi
->interp
,
2853 goi
->argc
, goi
->argv
,
2858 if (pc
== NULL
|| pc
->cti
== NULL
) {
2859 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2862 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2868 return JIM_CONTINUE
;
2875 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2877 struct target
*target
= get_current_target(CMD_CTX
);
2878 struct armv8_common
*armv8
= target_to_armv8(target
);
2880 return armv8_handle_cache_info_command(CMD
,
2881 &armv8
->armv8_mmu
.armv8_cache
);
2884 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2886 struct target
*target
= get_current_target(CMD_CTX
);
2887 if (!target_was_examined(target
)) {
2888 LOG_ERROR("target not examined yet");
2892 return aarch64_init_debug_access(target
);
2895 COMMAND_HANDLER(aarch64_handle_disassemble_command
)
2897 struct target
*target
= get_current_target(CMD_CTX
);
2899 if (target
== NULL
) {
2900 LOG_ERROR("No target selected");
2904 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2906 if (aarch64
->common_magic
!= AARCH64_COMMON_MAGIC
) {
2907 command_print(CMD
, "current target isn't an AArch64");
2912 target_addr_t address
;
2916 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], count
);
2919 COMMAND_PARSE_ADDRESS(CMD_ARGV
[0], address
);
2922 return ERROR_COMMAND_SYNTAX_ERROR
;
2925 return a64_disassemble(CMD
, target
, address
, count
);
2928 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2930 struct target
*target
= get_current_target(CMD_CTX
);
2931 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2933 static const struct jim_nvp nvp_maskisr_modes
[] = {
2934 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2935 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2936 { .name
= NULL
, .value
= -1 },
2938 const struct jim_nvp
*n
;
2941 n
= jim_nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2942 if (n
->name
== NULL
) {
2943 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2944 return ERROR_COMMAND_SYNTAX_ERROR
;
2947 aarch64
->isrmasking_mode
= n
->value
;
2950 n
= jim_nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2951 command_print(CMD
, "aarch64 interrupt mask %s", n
->name
);
2956 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2958 struct command
*c
= jim_to_command(interp
);
2959 struct command_context
*context
;
2960 struct target
*target
;
2963 bool is_mcr
= false;
2966 if (!strcmp(c
->name
, "mcr")) {
2973 context
= current_command_context(interp
);
2974 assert(context
!= NULL
);
2976 target
= get_current_target(context
);
2977 if (target
== NULL
) {
2978 LOG_ERROR("%s: no current target", __func__
);
2981 if (!target_was_examined(target
)) {
2982 LOG_ERROR("%s: not yet examined", target_name(target
));
2986 arm
= target_to_arm(target
);
2988 LOG_ERROR("%s: not an ARM", target_name(target
));
2992 if (target
->state
!= TARGET_HALTED
)
2993 return ERROR_TARGET_NOT_HALTED
;
2995 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2996 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
3000 if (argc
!= arg_cnt
) {
3001 LOG_ERROR("%s: wrong number of arguments", __func__
);
3013 /* NOTE: parameter sequence matches ARM instruction set usage:
3014 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3015 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3016 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3018 retval
= Jim_GetLong(interp
, argv
[1], &l
);
3019 if (retval
!= JIM_OK
)
3022 LOG_ERROR("%s: %s %d out of range", __func__
,
3023 "coprocessor", (int) l
);
3028 retval
= Jim_GetLong(interp
, argv
[2], &l
);
3029 if (retval
!= JIM_OK
)
3032 LOG_ERROR("%s: %s %d out of range", __func__
,
3038 retval
= Jim_GetLong(interp
, argv
[3], &l
);
3039 if (retval
!= JIM_OK
)
3042 LOG_ERROR("%s: %s %d out of range", __func__
,
3048 retval
= Jim_GetLong(interp
, argv
[4], &l
);
3049 if (retval
!= JIM_OK
)
3052 LOG_ERROR("%s: %s %d out of range", __func__
,
3058 retval
= Jim_GetLong(interp
, argv
[5], &l
);
3059 if (retval
!= JIM_OK
)
3062 LOG_ERROR("%s: %s %d out of range", __func__
,
3070 if (is_mcr
== true) {
3071 retval
= Jim_GetLong(interp
, argv
[6], &l
);
3072 if (retval
!= JIM_OK
)
3076 /* NOTE: parameters reordered! */
3077 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
3078 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
3079 if (retval
!= ERROR_OK
)
3082 /* NOTE: parameters reordered! */
3083 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
3084 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
3085 if (retval
!= ERROR_OK
)
3088 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
3094 static const struct command_registration aarch64_exec_command_handlers
[] = {
3096 .name
= "cache_info",
3097 .handler
= aarch64_handle_cache_info_command
,
3098 .mode
= COMMAND_EXEC
,
3099 .help
= "display information about target caches",
3104 .handler
= aarch64_handle_dbginit_command
,
3105 .mode
= COMMAND_EXEC
,
3106 .help
= "Initialize core debug",
3110 .name
= "disassemble",
3111 .handler
= aarch64_handle_disassemble_command
,
3112 .mode
= COMMAND_EXEC
,
3113 .help
= "Disassemble instructions",
3114 .usage
= "address [count]",
3118 .handler
= aarch64_mask_interrupts_command
,
3119 .mode
= COMMAND_ANY
,
3120 .help
= "mask aarch64 interrupts during single-step",
3121 .usage
= "['on'|'off']",
3125 .mode
= COMMAND_EXEC
,
3126 .jim_handler
= jim_mcrmrc
,
3127 .help
= "write coprocessor register",
3128 .usage
= "cpnum op1 CRn CRm op2 value",
3132 .mode
= COMMAND_EXEC
,
3133 .jim_handler
= jim_mcrmrc
,
3134 .help
= "read coprocessor register",
3135 .usage
= "cpnum op1 CRn CRm op2",
3138 .chain
= smp_command_handlers
,
3142 COMMAND_REGISTRATION_DONE
3145 extern const struct command_registration semihosting_common_handlers
[];
3147 static const struct command_registration aarch64_command_handlers
[] = {
3150 .mode
= COMMAND_ANY
,
3151 .help
= "ARM Command Group",
3153 .chain
= semihosting_common_handlers
3156 .chain
= armv8_command_handlers
,
3160 .mode
= COMMAND_ANY
,
3161 .help
= "Aarch64 command group",
3163 .chain
= aarch64_exec_command_handlers
,
3165 COMMAND_REGISTRATION_DONE
3168 struct target_type aarch64_target
= {
3171 .poll
= aarch64_poll
,
3172 .arch_state
= armv8_arch_state
,
3174 .halt
= aarch64_halt
,
3175 .resume
= aarch64_resume
,
3176 .step
= aarch64_step
,
3178 .assert_reset
= aarch64_assert_reset
,
3179 .deassert_reset
= aarch64_deassert_reset
,
3181 /* REVISIT allow exporting VFP3 registers ... */
3182 .get_gdb_arch
= armv8_get_gdb_arch
,
3183 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
3185 .read_memory
= aarch64_read_memory
,
3186 .write_memory
= aarch64_write_memory
,
3188 .add_breakpoint
= aarch64_add_breakpoint
,
3189 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
3190 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
3191 .remove_breakpoint
= aarch64_remove_breakpoint
,
3192 .add_watchpoint
= aarch64_add_watchpoint
,
3193 .remove_watchpoint
= aarch64_remove_watchpoint
,
3194 .hit_watchpoint
= aarch64_hit_watchpoint
,
3196 .commands
= aarch64_command_handlers
,
3197 .target_create
= aarch64_target_create
,
3198 .target_jim_configure
= aarch64_jim_configure
,
3199 .init_target
= aarch64_init_target
,
3200 .deinit_target
= aarch64_deinit_target
,
3201 .examine
= aarch64_examine
,
3203 .read_phys_memory
= aarch64_read_phys_memory
,
3204 .write_phys_memory
= aarch64_write_phys_memory
,
3206 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)