1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
26 #include "a64_disassembler.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_semihosting.h"
33 #include "jtag/interface.h"
35 #include <helper/time_support.h>
47 struct aarch64_private_config
{
48 struct adiv5_private_config adiv5_config
;
52 static int aarch64_poll(struct target
*target
);
53 static int aarch64_debug_entry(struct target
*target
);
54 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
55 static int aarch64_set_breakpoint(struct target
*target
,
56 struct breakpoint
*breakpoint
, uint8_t matchmode
);
57 static int aarch64_set_context_breakpoint(struct target
*target
,
58 struct breakpoint
*breakpoint
, uint8_t matchmode
);
59 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
60 struct breakpoint
*breakpoint
);
61 static int aarch64_unset_breakpoint(struct target
*target
,
62 struct breakpoint
*breakpoint
);
63 static int aarch64_mmu(struct target
*target
, int *enabled
);
64 static int aarch64_virt2phys(struct target
*target
,
65 target_addr_t virt
, target_addr_t
*phys
);
66 static int aarch64_read_cpu_memory(struct target
*target
,
67 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
69 static int aarch64_restore_system_control_reg(struct target
*target
)
71 enum arm_mode target_mode
= ARM_MODE_ANY
;
72 int retval
= ERROR_OK
;
75 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
76 struct armv8_common
*armv8
= target_to_armv8(target
);
78 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
79 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82 switch (armv8
->arm
.core_mode
) {
84 target_mode
= ARMV8_64_EL1H
;
88 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
92 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
96 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
105 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
109 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
110 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
114 if (target_mode
!= ARM_MODE_ANY
)
115 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
117 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
118 if (retval
!= ERROR_OK
)
121 if (target_mode
!= ARM_MODE_ANY
)
122 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
128 /* modify system_control_reg in order to enable or disable mmu for :
129 * - virt2phys address conversion
130 * - read or write memory in phys or virt address */
131 static int aarch64_mmu_modify(struct target
*target
, int enable
)
133 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
134 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
135 int retval
= ERROR_OK
;
136 enum arm_mode target_mode
= ARM_MODE_ANY
;
140 /* if mmu enabled at target stop and mmu not enable */
141 if (!(aarch64
->system_control_reg
& 0x1U
)) {
142 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
145 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
146 aarch64
->system_control_reg_curr
|= 0x1U
;
148 if (aarch64
->system_control_reg_curr
& 0x4U
) {
149 /* data cache is active */
150 aarch64
->system_control_reg_curr
&= ~0x4U
;
151 /* flush data cache armv8 function to be called */
152 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
153 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
155 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
156 aarch64
->system_control_reg_curr
&= ~0x1U
;
160 switch (armv8
->arm
.core_mode
) {
162 target_mode
= ARMV8_64_EL1H
;
166 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
170 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
174 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
183 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
187 LOG_DEBUG("unknown cpu state 0x%x", armv8
->arm
.core_mode
);
190 if (target_mode
!= ARM_MODE_ANY
)
191 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
193 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
194 aarch64
->system_control_reg_curr
);
196 if (target_mode
!= ARM_MODE_ANY
)
197 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
203 * Basic debug access, very low level assumes state is saved
205 static int aarch64_init_debug_access(struct target
*target
)
207 struct armv8_common
*armv8
= target_to_armv8(target
);
211 LOG_DEBUG("%s", target_name(target
));
213 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
214 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
215 if (retval
!= ERROR_OK
) {
216 LOG_DEBUG("Examine %s failed", "oslock");
220 /* Clear Sticky Power Down status Bit in PRSR to enable access to
221 the registers in the Core Power Domain */
222 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
223 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
224 if (retval
!= ERROR_OK
)
228 * Static CTI configuration:
229 * Channel 0 -> trigger outputs HALT request to PE
230 * Channel 1 -> trigger outputs Resume request to PE
231 * Gate all channel trigger events from entering the CTM
235 retval
= arm_cti_enable(armv8
->cti
, true);
236 /* By default, gate all channel events to and from the CTM */
237 if (retval
== ERROR_OK
)
238 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
239 /* output halt requests to PE on channel 0 event */
240 if (retval
== ERROR_OK
)
241 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
242 /* output restart requests to PE on channel 1 event */
243 if (retval
== ERROR_OK
)
244 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
245 if (retval
!= ERROR_OK
)
248 /* Resync breakpoint registers */
253 /* Write to memory mapped registers directly with no cache or mmu handling */
254 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
259 struct armv8_common
*armv8
= target_to_armv8(target
);
261 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
266 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
268 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
271 dpm
->arm
= &a8
->armv8_common
.arm
;
274 retval
= armv8_dpm_setup(dpm
);
275 if (retval
== ERROR_OK
)
276 retval
= armv8_dpm_initialize(dpm
);
281 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
283 struct armv8_common
*armv8
= target_to_armv8(target
);
284 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
287 static int aarch64_check_state_one(struct target
*target
,
288 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
290 struct armv8_common
*armv8
= target_to_armv8(target
);
294 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
295 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
296 if (retval
!= ERROR_OK
)
303 *p_result
= (prsr
& mask
) == (val
& mask
);
308 static int aarch64_wait_halt_one(struct target
*target
)
310 int retval
= ERROR_OK
;
313 int64_t then
= timeval_ms();
317 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
318 if (retval
!= ERROR_OK
|| halted
)
321 if (timeval_ms() > then
+ 1000) {
322 retval
= ERROR_TARGET_TIMEOUT
;
323 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
330 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
332 int retval
= ERROR_OK
;
333 struct target_list
*head
= target
->head
;
334 struct target
*first
= NULL
;
336 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
338 while (head
!= NULL
) {
339 struct target
*curr
= head
->target
;
340 struct armv8_common
*armv8
= target_to_armv8(curr
);
343 if (exc_target
&& curr
== target
)
345 if (!target_was_examined(curr
))
347 if (curr
->state
!= TARGET_RUNNING
)
350 /* HACK: mark this target as prepared for halting */
351 curr
->debug_reason
= DBG_REASON_DBGRQ
;
353 /* open the gate for channel 0 to let HALT requests pass to the CTM */
354 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
355 if (retval
== ERROR_OK
)
356 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
357 if (retval
!= ERROR_OK
)
360 LOG_DEBUG("target %s prepared", target_name(curr
));
367 if (exc_target
&& first
)
376 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
378 int retval
= ERROR_OK
;
379 struct armv8_common
*armv8
= target_to_armv8(target
);
381 LOG_DEBUG("%s", target_name(target
));
383 /* allow Halting Debug Mode */
384 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
385 if (retval
!= ERROR_OK
)
388 /* trigger an event on channel 0, this outputs a halt request to the PE */
389 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
390 if (retval
!= ERROR_OK
)
393 if (mode
== HALT_SYNC
) {
394 retval
= aarch64_wait_halt_one(target
);
395 if (retval
!= ERROR_OK
) {
396 if (retval
== ERROR_TARGET_TIMEOUT
)
397 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
405 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
407 struct target
*next
= target
;
410 /* prepare halt on all PEs of the group */
411 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
413 if (exc_target
&& next
== target
)
416 /* halt the target PE */
417 if (retval
== ERROR_OK
)
418 retval
= aarch64_halt_one(next
, HALT_LAZY
);
420 if (retval
!= ERROR_OK
)
423 /* wait for all PEs to halt */
424 int64_t then
= timeval_ms();
426 bool all_halted
= true;
427 struct target_list
*head
;
430 foreach_smp_target(head
, target
->head
) {
435 if (!target_was_examined(curr
))
438 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
439 if (retval
!= ERROR_OK
|| !halted
) {
448 if (timeval_ms() > then
+ 1000) {
449 retval
= ERROR_TARGET_TIMEOUT
;
454 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
455 * and it looks like the CTI's are not connected by a common
456 * trigger matrix. It seems that we need to halt one core in each
457 * cluster explicitly. So if we find that a core has not halted
458 * yet, we trigger an explicit halt for the second cluster.
460 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
461 if (retval
!= ERROR_OK
)
468 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
470 struct target
*gdb_target
= NULL
;
471 struct target_list
*head
;
474 if (debug_reason
== DBG_REASON_NOTHALTED
) {
475 LOG_DEBUG("Halting remaining targets in SMP group");
476 aarch64_halt_smp(target
, true);
479 /* poll all targets in the group, but skip the target that serves GDB */
480 foreach_smp_target(head
, target
->head
) {
482 /* skip calling context */
485 if (!target_was_examined(curr
))
487 /* skip targets that were already halted */
488 if (curr
->state
== TARGET_HALTED
)
490 /* remember the gdb_service->target */
491 if (curr
->gdb_service
!= NULL
)
492 gdb_target
= curr
->gdb_service
->target
;
494 if (curr
== gdb_target
)
497 /* avoid recursion in aarch64_poll() */
503 /* after all targets were updated, poll the gdb serving target */
504 if (gdb_target
!= NULL
&& gdb_target
!= target
)
505 aarch64_poll(gdb_target
);
511 * Aarch64 Run control
514 static int aarch64_poll(struct target
*target
)
516 enum target_state prev_target_state
;
517 int retval
= ERROR_OK
;
520 retval
= aarch64_check_state_one(target
,
521 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
522 if (retval
!= ERROR_OK
)
526 prev_target_state
= target
->state
;
527 if (prev_target_state
!= TARGET_HALTED
) {
528 enum target_debug_reason debug_reason
= target
->debug_reason
;
530 /* We have a halting debug event */
531 target
->state
= TARGET_HALTED
;
532 LOG_DEBUG("Target %s halted", target_name(target
));
533 retval
= aarch64_debug_entry(target
);
534 if (retval
!= ERROR_OK
)
538 update_halt_gdb(target
, debug_reason
);
540 if (arm_semihosting(target
, &retval
) != 0)
543 switch (prev_target_state
) {
547 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
549 case TARGET_DEBUG_RUNNING
:
550 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
557 target
->state
= TARGET_RUNNING
;
562 static int aarch64_halt(struct target
*target
)
564 struct armv8_common
*armv8
= target_to_armv8(target
);
565 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
568 return aarch64_halt_smp(target
, false);
570 return aarch64_halt_one(target
, HALT_SYNC
);
573 static int aarch64_restore_one(struct target
*target
, int current
,
574 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
576 struct armv8_common
*armv8
= target_to_armv8(target
);
577 struct arm
*arm
= &armv8
->arm
;
581 LOG_DEBUG("%s", target_name(target
));
583 if (!debug_execution
)
584 target_free_all_working_areas(target
);
586 /* current = 1: continue on current pc, otherwise continue at <address> */
587 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
589 resume_pc
= *address
;
591 *address
= resume_pc
;
593 /* Make sure that the Armv7 gdb thumb fixups does not
594 * kill the return address
596 switch (arm
->core_state
) {
598 resume_pc
&= 0xFFFFFFFC;
600 case ARM_STATE_AARCH64
:
601 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
603 case ARM_STATE_THUMB
:
604 case ARM_STATE_THUMB_EE
:
605 /* When the return address is loaded into PC
606 * bit 0 must be 1 to stay in Thumb state
610 case ARM_STATE_JAZELLE
:
611 LOG_ERROR("How do I resume into Jazelle state??");
614 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
615 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
616 arm
->pc
->dirty
= true;
617 arm
->pc
->valid
= true;
619 /* called it now before restoring context because it uses cpu
620 * register r0 for restoring system control register */
621 retval
= aarch64_restore_system_control_reg(target
);
622 if (retval
== ERROR_OK
)
623 retval
= aarch64_restore_context(target
, handle_breakpoints
);
629 * prepare single target for restart
633 static int aarch64_prepare_restart_one(struct target
*target
)
635 struct armv8_common
*armv8
= target_to_armv8(target
);
640 LOG_DEBUG("%s", target_name(target
));
642 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
643 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
644 if (retval
!= ERROR_OK
)
647 if ((dscr
& DSCR_ITE
) == 0)
648 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
649 if ((dscr
& DSCR_ERR
) != 0)
650 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
652 /* acknowledge a pending CTI halt event */
653 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
655 * open the CTI gate for channel 1 so that the restart events
656 * get passed along to all PEs. Also close gate for channel 0
657 * to isolate the PE from halt events.
659 if (retval
== ERROR_OK
)
660 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
661 if (retval
== ERROR_OK
)
662 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
664 /* make sure that DSCR.HDE is set */
665 if (retval
== ERROR_OK
) {
667 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
668 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
671 if (retval
== ERROR_OK
) {
672 /* clear sticky bits in PRSR, SDR is now 0 */
673 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
674 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
680 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
682 struct armv8_common
*armv8
= target_to_armv8(target
);
685 LOG_DEBUG("%s", target_name(target
));
687 /* trigger an event on channel 1, generates a restart request to the PE */
688 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
689 if (retval
!= ERROR_OK
)
692 if (mode
== RESTART_SYNC
) {
693 int64_t then
= timeval_ms();
697 * if PRSR.SDR is set now, the target did restart, even
698 * if it's now already halted again (e.g. due to breakpoint)
700 retval
= aarch64_check_state_one(target
,
701 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
702 if (retval
!= ERROR_OK
|| resumed
)
705 if (timeval_ms() > then
+ 1000) {
706 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
707 retval
= ERROR_TARGET_TIMEOUT
;
713 if (retval
!= ERROR_OK
)
716 target
->debug_reason
= DBG_REASON_NOTHALTED
;
717 target
->state
= TARGET_RUNNING
;
722 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
726 LOG_DEBUG("%s", target_name(target
));
728 retval
= aarch64_prepare_restart_one(target
);
729 if (retval
== ERROR_OK
)
730 retval
= aarch64_do_restart_one(target
, mode
);
736 * prepare all but the current target for restart
738 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
740 int retval
= ERROR_OK
;
741 struct target_list
*head
;
742 struct target
*first
= NULL
;
745 foreach_smp_target(head
, target
->head
) {
746 struct target
*curr
= head
->target
;
748 /* skip calling target */
751 if (!target_was_examined(curr
))
753 if (curr
->state
!= TARGET_HALTED
)
756 /* resume at current address, not in step mode */
757 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
758 if (retval
== ERROR_OK
)
759 retval
= aarch64_prepare_restart_one(curr
);
760 if (retval
!= ERROR_OK
) {
761 LOG_ERROR("failed to restore target %s", target_name(curr
));
764 /* remember the first valid target in the group */
776 static int aarch64_step_restart_smp(struct target
*target
)
778 int retval
= ERROR_OK
;
779 struct target_list
*head
;
780 struct target
*first
= NULL
;
782 LOG_DEBUG("%s", target_name(target
));
784 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
785 if (retval
!= ERROR_OK
)
789 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
790 if (retval
!= ERROR_OK
) {
791 LOG_DEBUG("error restarting target %s", target_name(first
));
795 int64_t then
= timeval_ms();
797 struct target
*curr
= target
;
798 bool all_resumed
= true;
800 foreach_smp_target(head
, target
->head
) {
809 if (!target_was_examined(curr
))
812 retval
= aarch64_check_state_one(curr
,
813 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
814 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
819 if (curr
->state
!= TARGET_RUNNING
) {
820 curr
->state
= TARGET_RUNNING
;
821 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
822 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
829 if (timeval_ms() > then
+ 1000) {
830 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
831 retval
= ERROR_TARGET_TIMEOUT
;
835 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
836 * and it looks like the CTI's are not connected by a common
837 * trigger matrix. It seems that we need to halt one core in each
838 * cluster explicitly. So if we find that a core has not halted
839 * yet, we trigger an explicit resume for the second cluster.
841 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
842 if (retval
!= ERROR_OK
)
849 static int aarch64_resume(struct target
*target
, int current
,
850 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
853 uint64_t addr
= address
;
855 struct armv8_common
*armv8
= target_to_armv8(target
);
856 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
858 if (target
->state
!= TARGET_HALTED
)
859 return ERROR_TARGET_NOT_HALTED
;
862 * If this target is part of a SMP group, prepare the others
863 * targets for resuming. This involves restoring the complete
864 * target register context and setting up CTI gates to accept
865 * resume events from the trigger matrix.
868 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
869 if (retval
!= ERROR_OK
)
873 /* all targets prepared, restore and restart the current target */
874 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
876 if (retval
== ERROR_OK
)
877 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
878 if (retval
!= ERROR_OK
)
882 int64_t then
= timeval_ms();
884 struct target
*curr
= target
;
885 struct target_list
*head
;
886 bool all_resumed
= true;
888 foreach_smp_target(head
, target
->head
) {
895 if (!target_was_examined(curr
))
898 retval
= aarch64_check_state_one(curr
,
899 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
900 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
905 if (curr
->state
!= TARGET_RUNNING
) {
906 curr
->state
= TARGET_RUNNING
;
907 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
908 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
915 if (timeval_ms() > then
+ 1000) {
916 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
917 retval
= ERROR_TARGET_TIMEOUT
;
922 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
923 * and it looks like the CTI's are not connected by a common
924 * trigger matrix. It seems that we need to halt one core in each
925 * cluster explicitly. So if we find that a core has not halted
926 * yet, we trigger an explicit resume for the second cluster.
928 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
929 if (retval
!= ERROR_OK
)
934 if (retval
!= ERROR_OK
)
937 target
->debug_reason
= DBG_REASON_NOTHALTED
;
939 if (!debug_execution
) {
940 target
->state
= TARGET_RUNNING
;
941 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
942 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
944 target
->state
= TARGET_DEBUG_RUNNING
;
945 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
946 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
952 static int aarch64_debug_entry(struct target
*target
)
954 int retval
= ERROR_OK
;
955 struct armv8_common
*armv8
= target_to_armv8(target
);
956 struct arm_dpm
*dpm
= &armv8
->dpm
;
957 enum arm_state core_state
;
960 /* make sure to clear all sticky errors */
961 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
962 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
963 if (retval
== ERROR_OK
)
964 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
965 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
966 if (retval
== ERROR_OK
)
967 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
969 if (retval
!= ERROR_OK
)
972 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
975 core_state
= armv8_dpm_get_core_state(dpm
);
976 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
977 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
979 /* close the CTI gate for all events */
980 if (retval
== ERROR_OK
)
981 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
982 /* discard async exceptions */
983 if (retval
== ERROR_OK
)
984 retval
= dpm
->instr_cpsr_sync(dpm
);
985 if (retval
!= ERROR_OK
)
988 /* Examine debug reason */
989 armv8_dpm_report_dscr(dpm
, dscr
);
991 /* save address of instruction that triggered the watchpoint? */
992 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
996 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
997 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
999 if (retval
!= ERROR_OK
)
1002 wfar
= (wfar
<< 32);
1003 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1004 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
1006 if (retval
!= ERROR_OK
)
1009 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
1012 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1014 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1015 retval
= armv8
->post_debug_entry(target
);
1020 static int aarch64_post_debug_entry(struct target
*target
)
1022 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1023 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1025 enum arm_mode target_mode
= ARM_MODE_ANY
;
1028 switch (armv8
->arm
.core_mode
) {
1030 target_mode
= ARMV8_64_EL1H
;
1034 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1038 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1042 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1051 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1055 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1056 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
1060 if (target_mode
!= ARM_MODE_ANY
)
1061 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1063 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1064 if (retval
!= ERROR_OK
)
1067 if (target_mode
!= ARM_MODE_ANY
)
1068 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1070 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1071 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1073 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1074 armv8_identify_cache(armv8
);
1075 armv8_read_mpidr(armv8
);
1078 armv8
->armv8_mmu
.mmu_enabled
=
1079 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1080 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1081 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1082 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1083 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1088 * single-step a target
1090 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1091 int handle_breakpoints
)
1093 struct armv8_common
*armv8
= target_to_armv8(target
);
1094 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1095 int saved_retval
= ERROR_OK
;
1099 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1101 if (target
->state
!= TARGET_HALTED
) {
1102 LOG_WARNING("target not halted");
1103 return ERROR_TARGET_NOT_HALTED
;
1106 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1107 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1108 /* make sure EDECR.SS is not set when restoring the register */
1110 if (retval
== ERROR_OK
) {
1112 /* set EDECR.SS to enter hardware step mode */
1113 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1114 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1116 /* disable interrupts while stepping */
1117 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1118 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1119 /* bail out if stepping setup has failed */
1120 if (retval
!= ERROR_OK
)
1123 if (target
->smp
&& (current
== 1)) {
1125 * isolate current target so that it doesn't get resumed
1126 * together with the others
1128 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1129 /* resume all other targets in the group */
1130 if (retval
== ERROR_OK
)
1131 retval
= aarch64_step_restart_smp(target
);
1132 if (retval
!= ERROR_OK
) {
1133 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1136 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1139 /* all other targets running, restore and restart the current target */
1140 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1141 if (retval
== ERROR_OK
)
1142 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1144 if (retval
!= ERROR_OK
)
1147 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1148 if (!handle_breakpoints
)
1149 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1151 int64_t then
= timeval_ms();
1156 retval
= aarch64_check_state_one(target
,
1157 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1158 if (retval
!= ERROR_OK
|| stepped
)
1161 if (timeval_ms() > then
+ 100) {
1162 LOG_ERROR("timeout waiting for target %s halt after step",
1163 target_name(target
));
1164 retval
= ERROR_TARGET_TIMEOUT
;
1170 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1171 * causes a timeout. The core takes the step but doesn't complete it and so
1172 * debug state is never entered. However, you can manually halt the core
1173 * as an external debug even is also a WFI wakeup event.
1175 if (retval
== ERROR_TARGET_TIMEOUT
)
1176 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1179 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1180 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1181 if (retval
!= ERROR_OK
)
1184 /* restore interrupts */
1185 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1186 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1187 if (retval
!= ERROR_OK
)
1191 if (saved_retval
!= ERROR_OK
)
1192 return saved_retval
;
1197 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1199 struct armv8_common
*armv8
= target_to_armv8(target
);
1200 struct arm
*arm
= &armv8
->arm
;
1204 LOG_DEBUG("%s", target_name(target
));
1206 if (armv8
->pre_restore_context
)
1207 armv8
->pre_restore_context(target
);
1209 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1210 if (retval
== ERROR_OK
) {
1211 /* registers are now invalid */
1212 register_cache_invalidate(arm
->core_cache
);
1213 register_cache_invalidate(arm
->core_cache
->next
);
1220 * Cortex-A8 Breakpoint and watchpoint functions
1223 /* Setup hardware Breakpoint Register Pair */
1224 static int aarch64_set_breakpoint(struct target
*target
,
1225 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1230 uint8_t byte_addr_select
= 0x0F;
1231 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1232 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1233 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1235 if (breakpoint
->set
) {
1236 LOG_WARNING("breakpoint already set");
1240 if (breakpoint
->type
== BKPT_HARD
) {
1242 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1244 if (brp_i
>= aarch64
->brp_num
) {
1245 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1246 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1248 breakpoint
->set
= brp_i
+ 1;
1249 if (breakpoint
->length
== 2)
1250 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1251 control
= ((matchmode
& 0x7) << 20)
1253 | (byte_addr_select
<< 5)
1255 brp_list
[brp_i
].used
= 1;
1256 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1257 brp_list
[brp_i
].control
= control
;
1258 bpt_value
= brp_list
[brp_i
].value
;
1260 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1261 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1262 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1263 if (retval
!= ERROR_OK
)
1265 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1266 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1267 (uint32_t)(bpt_value
>> 32));
1268 if (retval
!= ERROR_OK
)
1271 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1272 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1273 brp_list
[brp_i
].control
);
1274 if (retval
!= ERROR_OK
)
1276 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1277 brp_list
[brp_i
].control
,
1278 brp_list
[brp_i
].value
);
1280 } else if (breakpoint
->type
== BKPT_SOFT
) {
1284 if (armv8_dpm_get_core_state(&armv8
->dpm
) == ARM_STATE_AARCH64
) {
1285 opcode
= ARMV8_HLT(11);
1287 if (breakpoint
->length
!= 4)
1288 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1291 * core_state is ARM_STATE_ARM
1292 * in that case the opcode depends on breakpoint length:
1293 * - if length == 4 => A32 opcode
1294 * - if length == 2 => T32 opcode
1295 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1296 * in that case the length should be changed from 3 to 4 bytes
1298 opcode
= (breakpoint
->length
== 4) ? ARMV8_HLT_A1(11) :
1299 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1301 if (breakpoint
->length
== 3)
1302 breakpoint
->length
= 4;
1305 buf_set_u32(code
, 0, 32, opcode
);
1307 retval
= target_read_memory(target
,
1308 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1309 breakpoint
->length
, 1,
1310 breakpoint
->orig_instr
);
1311 if (retval
!= ERROR_OK
)
1314 armv8_cache_d_inner_flush_virt(armv8
,
1315 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1316 breakpoint
->length
);
1318 retval
= target_write_memory(target
,
1319 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1320 breakpoint
->length
, 1, code
);
1321 if (retval
!= ERROR_OK
)
1324 armv8_cache_d_inner_flush_virt(armv8
,
1325 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1326 breakpoint
->length
);
1328 armv8_cache_i_inner_inval_virt(armv8
,
1329 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1330 breakpoint
->length
);
1332 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1335 /* Ensure that halting debug mode is enable */
1336 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1337 if (retval
!= ERROR_OK
) {
1338 LOG_DEBUG("Failed to set DSCR.HDE");
1345 static int aarch64_set_context_breakpoint(struct target
*target
,
1346 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1348 int retval
= ERROR_FAIL
;
1351 uint8_t byte_addr_select
= 0x0F;
1352 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1353 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1354 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1356 if (breakpoint
->set
) {
1357 LOG_WARNING("breakpoint already set");
1360 /*check available context BRPs*/
1361 while ((brp_list
[brp_i
].used
||
1362 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1365 if (brp_i
>= aarch64
->brp_num
) {
1366 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1370 breakpoint
->set
= brp_i
+ 1;
1371 control
= ((matchmode
& 0x7) << 20)
1373 | (byte_addr_select
<< 5)
1375 brp_list
[brp_i
].used
= 1;
1376 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1377 brp_list
[brp_i
].control
= control
;
1378 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1379 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1380 brp_list
[brp_i
].value
);
1381 if (retval
!= ERROR_OK
)
1383 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1384 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1385 brp_list
[brp_i
].control
);
1386 if (retval
!= ERROR_OK
)
1388 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1389 brp_list
[brp_i
].control
,
1390 brp_list
[brp_i
].value
);
1395 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1397 int retval
= ERROR_FAIL
;
1398 int brp_1
= 0; /* holds the contextID pair */
1399 int brp_2
= 0; /* holds the IVA pair */
1400 uint32_t control_CTX
, control_IVA
;
1401 uint8_t CTX_byte_addr_select
= 0x0F;
1402 uint8_t IVA_byte_addr_select
= 0x0F;
1403 uint8_t CTX_machmode
= 0x03;
1404 uint8_t IVA_machmode
= 0x01;
1405 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1406 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1407 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1409 if (breakpoint
->set
) {
1410 LOG_WARNING("breakpoint already set");
1413 /*check available context BRPs*/
1414 while ((brp_list
[brp_1
].used
||
1415 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1418 printf("brp(CTX) found num: %d\n", brp_1
);
1419 if (brp_1
>= aarch64
->brp_num
) {
1420 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1424 while ((brp_list
[brp_2
].used
||
1425 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1428 printf("brp(IVA) found num: %d\n", brp_2
);
1429 if (brp_2
>= aarch64
->brp_num
) {
1430 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1434 breakpoint
->set
= brp_1
+ 1;
1435 breakpoint
->linked_BRP
= brp_2
;
1436 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1439 | (CTX_byte_addr_select
<< 5)
1441 brp_list
[brp_1
].used
= 1;
1442 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1443 brp_list
[brp_1
].control
= control_CTX
;
1444 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1445 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1446 brp_list
[brp_1
].value
);
1447 if (retval
!= ERROR_OK
)
1449 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1450 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1451 brp_list
[brp_1
].control
);
1452 if (retval
!= ERROR_OK
)
1455 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1458 | (IVA_byte_addr_select
<< 5)
1460 brp_list
[brp_2
].used
= 1;
1461 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1462 brp_list
[brp_2
].control
= control_IVA
;
1463 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1464 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1465 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1466 if (retval
!= ERROR_OK
)
1468 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1469 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1470 brp_list
[brp_2
].value
>> 32);
1471 if (retval
!= ERROR_OK
)
1473 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1474 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1475 brp_list
[brp_2
].control
);
1476 if (retval
!= ERROR_OK
)
1482 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1485 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1486 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1487 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1489 if (!breakpoint
->set
) {
1490 LOG_WARNING("breakpoint not set");
1494 if (breakpoint
->type
== BKPT_HARD
) {
1495 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1496 int brp_i
= breakpoint
->set
- 1;
1497 int brp_j
= breakpoint
->linked_BRP
;
1498 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1499 LOG_DEBUG("Invalid BRP number in breakpoint");
1502 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1503 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1504 brp_list
[brp_i
].used
= 0;
1505 brp_list
[brp_i
].value
= 0;
1506 brp_list
[brp_i
].control
= 0;
1507 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1508 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1509 brp_list
[brp_i
].control
);
1510 if (retval
!= ERROR_OK
)
1512 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1513 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1514 (uint32_t)brp_list
[brp_i
].value
);
1515 if (retval
!= ERROR_OK
)
1517 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1518 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1519 (uint32_t)brp_list
[brp_i
].value
);
1520 if (retval
!= ERROR_OK
)
1522 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1523 LOG_DEBUG("Invalid BRP number in breakpoint");
1526 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1527 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1528 brp_list
[brp_j
].used
= 0;
1529 brp_list
[brp_j
].value
= 0;
1530 brp_list
[brp_j
].control
= 0;
1531 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1532 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1533 brp_list
[brp_j
].control
);
1534 if (retval
!= ERROR_OK
)
1536 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1537 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1538 (uint32_t)brp_list
[brp_j
].value
);
1539 if (retval
!= ERROR_OK
)
1541 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1542 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1543 (uint32_t)brp_list
[brp_j
].value
);
1544 if (retval
!= ERROR_OK
)
1547 breakpoint
->linked_BRP
= 0;
1548 breakpoint
->set
= 0;
1552 int brp_i
= breakpoint
->set
- 1;
1553 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1554 LOG_DEBUG("Invalid BRP number in breakpoint");
1557 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1558 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1559 brp_list
[brp_i
].used
= 0;
1560 brp_list
[brp_i
].value
= 0;
1561 brp_list
[brp_i
].control
= 0;
1562 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1563 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1564 brp_list
[brp_i
].control
);
1565 if (retval
!= ERROR_OK
)
1567 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1568 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1569 brp_list
[brp_i
].value
);
1570 if (retval
!= ERROR_OK
)
1573 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1574 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1575 (uint32_t)brp_list
[brp_i
].value
);
1576 if (retval
!= ERROR_OK
)
1578 breakpoint
->set
= 0;
1582 /* restore original instruction (kept in target endianness) */
1584 armv8_cache_d_inner_flush_virt(armv8
,
1585 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1586 breakpoint
->length
);
1588 if (breakpoint
->length
== 4) {
1589 retval
= target_write_memory(target
,
1590 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1591 4, 1, breakpoint
->orig_instr
);
1592 if (retval
!= ERROR_OK
)
1595 retval
= target_write_memory(target
,
1596 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1597 2, 1, breakpoint
->orig_instr
);
1598 if (retval
!= ERROR_OK
)
1602 armv8_cache_d_inner_flush_virt(armv8
,
1603 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1604 breakpoint
->length
);
1606 armv8_cache_i_inner_inval_virt(armv8
,
1607 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1608 breakpoint
->length
);
1610 breakpoint
->set
= 0;
1615 static int aarch64_add_breakpoint(struct target
*target
,
1616 struct breakpoint
*breakpoint
)
1618 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1620 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1621 LOG_INFO("no hardware breakpoint available");
1622 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1625 if (breakpoint
->type
== BKPT_HARD
)
1626 aarch64
->brp_num_available
--;
1628 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1631 static int aarch64_add_context_breakpoint(struct target
*target
,
1632 struct breakpoint
*breakpoint
)
1634 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1636 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1637 LOG_INFO("no hardware breakpoint available");
1638 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1641 if (breakpoint
->type
== BKPT_HARD
)
1642 aarch64
->brp_num_available
--;
1644 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1647 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1648 struct breakpoint
*breakpoint
)
1650 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1652 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1653 LOG_INFO("no hardware breakpoint available");
1654 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1657 if (breakpoint
->type
== BKPT_HARD
)
1658 aarch64
->brp_num_available
--;
1660 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1663 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1665 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1668 /* It is perfectly possible to remove breakpoints while the target is running */
1669 if (target
->state
!= TARGET_HALTED
) {
1670 LOG_WARNING("target not halted");
1671 return ERROR_TARGET_NOT_HALTED
;
1675 if (breakpoint
->set
) {
1676 aarch64_unset_breakpoint(target
, breakpoint
);
1677 if (breakpoint
->type
== BKPT_HARD
)
1678 aarch64
->brp_num_available
++;
1684 /* Setup hardware Watchpoint Register Pair */
1685 static int aarch64_set_watchpoint(struct target
*target
,
1686 struct watchpoint
*watchpoint
)
1690 uint32_t control
, offset
, length
;
1691 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1692 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1693 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1695 if (watchpoint
->set
) {
1696 LOG_WARNING("watchpoint already set");
1700 while (wp_list
[wp_i
].used
&& (wp_i
< aarch64
->wp_num
))
1702 if (wp_i
>= aarch64
->wp_num
) {
1703 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1704 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1707 control
= (1 << 0) /* enable */
1708 | (3 << 1) /* both user and privileged access */
1709 | (1 << 13); /* higher mode control */
1711 switch (watchpoint
->rw
) {
1723 /* Match up to 8 bytes. */
1724 offset
= watchpoint
->address
& 7;
1725 length
= watchpoint
->length
;
1726 if (offset
+ length
> sizeof(uint64_t)) {
1727 length
= sizeof(uint64_t) - offset
;
1728 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1730 for (; length
> 0; offset
++, length
--)
1731 control
|= (1 << offset
) << 5;
1733 wp_list
[wp_i
].value
= watchpoint
->address
& 0xFFFFFFFFFFFFFFF8ULL
;
1734 wp_list
[wp_i
].control
= control
;
1736 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1737 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1738 (uint32_t)(wp_list
[wp_i
].value
& 0xFFFFFFFF));
1739 if (retval
!= ERROR_OK
)
1741 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1742 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].BRPn
,
1743 (uint32_t)(wp_list
[wp_i
].value
>> 32));
1744 if (retval
!= ERROR_OK
)
1747 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1748 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1750 if (retval
!= ERROR_OK
)
1752 LOG_DEBUG("wp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, wp_i
,
1753 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1755 /* Ensure that halting debug mode is enable */
1756 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1757 if (retval
!= ERROR_OK
) {
1758 LOG_DEBUG("Failed to set DSCR.HDE");
1762 wp_list
[wp_i
].used
= 1;
1763 watchpoint
->set
= wp_i
+ 1;
1768 /* Clear hardware Watchpoint Register Pair */
1769 static int aarch64_unset_watchpoint(struct target
*target
,
1770 struct watchpoint
*watchpoint
)
1773 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1774 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1775 struct aarch64_brp
*wp_list
= aarch64
->wp_list
;
1777 if (!watchpoint
->set
) {
1778 LOG_WARNING("watchpoint not set");
1782 wp_i
= watchpoint
->set
- 1;
1783 if ((wp_i
< 0) || (wp_i
>= aarch64
->wp_num
)) {
1784 LOG_DEBUG("Invalid WP number in watchpoint");
1787 LOG_DEBUG("rwp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, wp_i
,
1788 wp_list
[wp_i
].control
, wp_list
[wp_i
].value
);
1789 wp_list
[wp_i
].used
= 0;
1790 wp_list
[wp_i
].value
= 0;
1791 wp_list
[wp_i
].control
= 0;
1792 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1793 + CPUV8_DBG_WCR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1794 wp_list
[wp_i
].control
);
1795 if (retval
!= ERROR_OK
)
1797 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1798 + CPUV8_DBG_WVR_BASE
+ 16 * wp_list
[wp_i
].BRPn
,
1799 wp_list
[wp_i
].value
);
1800 if (retval
!= ERROR_OK
)
1803 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1804 + CPUV8_DBG_WVR_BASE
+ 4 + 16 * wp_list
[wp_i
].BRPn
,
1805 (uint32_t)wp_list
[wp_i
].value
);
1806 if (retval
!= ERROR_OK
)
1808 watchpoint
->set
= 0;
1813 static int aarch64_add_watchpoint(struct target
*target
,
1814 struct watchpoint
*watchpoint
)
1817 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1819 if (aarch64
->wp_num_available
< 1) {
1820 LOG_INFO("no hardware watchpoint available");
1821 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1824 retval
= aarch64_set_watchpoint(target
, watchpoint
);
1825 if (retval
== ERROR_OK
)
1826 aarch64
->wp_num_available
--;
1831 static int aarch64_remove_watchpoint(struct target
*target
,
1832 struct watchpoint
*watchpoint
)
1834 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1836 if (watchpoint
->set
) {
1837 aarch64_unset_watchpoint(target
, watchpoint
);
1838 aarch64
->wp_num_available
++;
1845 * find out which watchpoint hits
1846 * get exception address and compare the address to watchpoints
1848 int aarch64_hit_watchpoint(struct target
*target
,
1849 struct watchpoint
**hit_watchpoint
)
1851 if (target
->debug_reason
!= DBG_REASON_WATCHPOINT
)
1854 struct armv8_common
*armv8
= target_to_armv8(target
);
1856 uint64_t exception_address
;
1857 struct watchpoint
*wp
;
1859 exception_address
= armv8
->dpm
.wp_pc
;
1861 if (exception_address
== 0xFFFFFFFF)
1864 /**********************************************************/
1865 /* see if a watchpoint address matches a value read from */
1866 /* the EDWAR register. Testing shows that on some ARM CPUs*/
1867 /* the EDWAR value needs to have 8 added to it so we add */
1868 /* that check as well not sure if that is a core bug) */
1869 /**********************************************************/
1870 for (exception_address
= armv8
->dpm
.wp_pc
; exception_address
<= (armv8
->dpm
.wp_pc
+ 8);
1871 exception_address
+= 8) {
1872 for (wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
1873 if ((exception_address
>= wp
->address
) && (exception_address
< (wp
->address
+ wp
->length
))) {
1874 *hit_watchpoint
= wp
;
1875 if (exception_address
!= armv8
->dpm
.wp_pc
)
1876 LOG_DEBUG("watchpoint hit required EDWAR to be increased by 8");
1886 * Cortex-A8 Reset functions
1889 static int aarch64_enable_reset_catch(struct target
*target
, bool enable
)
1891 struct armv8_common
*armv8
= target_to_armv8(target
);
1895 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1896 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1897 LOG_DEBUG("EDECR = 0x%08" PRIx32
", enable=%d", edecr
, enable
);
1898 if (retval
!= ERROR_OK
)
1906 return mem_ap_write_atomic_u32(armv8
->debug_ap
,
1907 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1910 static int aarch64_clear_reset_catch(struct target
*target
)
1912 struct armv8_common
*armv8
= target_to_armv8(target
);
1917 /* check if Reset Catch debug event triggered as expected */
1918 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1919 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &edesr
);
1920 if (retval
!= ERROR_OK
)
1923 was_triggered
= !!(edesr
& ESR_RC
);
1924 LOG_DEBUG("Reset Catch debug event %s",
1925 was_triggered
? "triggered" : "NOT triggered!");
1927 if (was_triggered
) {
1928 /* clear pending Reset Catch debug event */
1930 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1931 armv8
->debug_base
+ CPUV8_DBG_EDESR
, edesr
);
1932 if (retval
!= ERROR_OK
)
1939 static int aarch64_assert_reset(struct target
*target
)
1941 struct armv8_common
*armv8
= target_to_armv8(target
);
1942 enum reset_types reset_config
= jtag_get_reset_config();
1947 /* Issue some kind of warm reset. */
1948 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1949 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1950 else if (reset_config
& RESET_HAS_SRST
) {
1951 bool srst_asserted
= false;
1953 if (target
->reset_halt
) {
1954 if (target_was_examined(target
)) {
1956 if (reset_config
& RESET_SRST_NO_GATING
) {
1958 * SRST needs to be asserted *before* Reset Catch
1959 * debug event can be set up.
1961 adapter_assert_reset();
1962 srst_asserted
= true;
1964 /* make sure to clear all sticky errors */
1965 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1966 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1969 /* set up Reset Catch debug event to halt the CPU after reset */
1970 retval
= aarch64_enable_reset_catch(target
, true);
1971 if (retval
!= ERROR_OK
)
1972 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1973 target_name(target
));
1975 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1976 target_name(target
));
1980 /* REVISIT handle "pulls" cases, if there's
1981 * hardware that needs them to work.
1984 adapter_assert_reset();
1986 LOG_ERROR("%s: how to reset?", target_name(target
));
1990 /* registers are now invalid */
1991 if (target_was_examined(target
)) {
1992 register_cache_invalidate(armv8
->arm
.core_cache
);
1993 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1996 target
->state
= TARGET_RESET
;
2001 static int aarch64_deassert_reset(struct target
*target
)
2007 /* be certain SRST is off */
2008 adapter_deassert_reset();
2010 if (!target_was_examined(target
))
2013 retval
= aarch64_init_debug_access(target
);
2014 if (retval
!= ERROR_OK
)
2017 retval
= aarch64_poll(target
);
2018 if (retval
!= ERROR_OK
)
2021 if (target
->reset_halt
) {
2022 /* clear pending Reset Catch debug event */
2023 retval
= aarch64_clear_reset_catch(target
);
2024 if (retval
!= ERROR_OK
)
2025 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2026 target_name(target
));
2028 /* disable Reset Catch debug event */
2029 retval
= aarch64_enable_reset_catch(target
, false);
2030 if (retval
!= ERROR_OK
)
2031 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2032 target_name(target
));
2034 if (target
->state
!= TARGET_HALTED
) {
2035 LOG_WARNING("%s: ran after reset and before halt ...",
2036 target_name(target
));
2037 retval
= target_halt(target
);
2038 if (retval
!= ERROR_OK
)
2046 static int aarch64_write_cpu_memory_slow(struct target
*target
,
2047 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2049 struct armv8_common
*armv8
= target_to_armv8(target
);
2050 struct arm_dpm
*dpm
= &armv8
->dpm
;
2051 struct arm
*arm
= &armv8
->arm
;
2054 armv8_reg_current(arm
, 1)->dirty
= true;
2056 /* change DCC to normal mode if necessary */
2057 if (*dscr
& DSCR_MA
) {
2059 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2060 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2061 if (retval
!= ERROR_OK
)
2066 uint32_t data
, opcode
;
2068 /* write the data to store into DTRRX */
2072 data
= target_buffer_get_u16(target
, buffer
);
2074 data
= target_buffer_get_u32(target
, buffer
);
2075 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2076 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
2077 if (retval
!= ERROR_OK
)
2080 if (arm
->core_state
== ARM_STATE_AARCH64
)
2081 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
2083 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2084 if (retval
!= ERROR_OK
)
2088 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
2090 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
2092 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
2093 retval
= dpm
->instr_execute(dpm
, opcode
);
2094 if (retval
!= ERROR_OK
)
2105 static int aarch64_write_cpu_memory_fast(struct target
*target
,
2106 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
2108 struct armv8_common
*armv8
= target_to_armv8(target
);
2109 struct arm
*arm
= &armv8
->arm
;
2112 armv8_reg_current(arm
, 1)->dirty
= true;
2114 /* Step 1.d - Change DCC to memory mode */
2116 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2117 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2118 if (retval
!= ERROR_OK
)
2122 /* Step 2.a - Do the write */
2123 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
2124 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
2125 if (retval
!= ERROR_OK
)
2128 /* Step 3.a - Switch DTR mode back to Normal mode */
2130 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2131 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2132 if (retval
!= ERROR_OK
)
2138 static int aarch64_write_cpu_memory(struct target
*target
,
2139 uint64_t address
, uint32_t size
,
2140 uint32_t count
, const uint8_t *buffer
)
2142 /* write memory through APB-AP */
2143 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2144 struct armv8_common
*armv8
= target_to_armv8(target
);
2145 struct arm_dpm
*dpm
= &armv8
->dpm
;
2146 struct arm
*arm
= &armv8
->arm
;
2149 if (target
->state
!= TARGET_HALTED
) {
2150 LOG_WARNING("target not halted");
2151 return ERROR_TARGET_NOT_HALTED
;
2154 /* Mark register X0 as dirty, as it will be used
2155 * for transferring the data.
2156 * It will be restored automatically when exiting
2159 armv8_reg_current(arm
, 0)->dirty
= true;
2161 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2164 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2165 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2166 if (retval
!= ERROR_OK
)
2169 /* Set Normal access mode */
2170 dscr
= (dscr
& ~DSCR_MA
);
2171 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2172 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2173 if (retval
!= ERROR_OK
)
2176 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2177 /* Write X0 with value 'address' using write procedure */
2178 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2179 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2180 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2181 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2183 /* Write R0 with value 'address' using write procedure */
2184 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2185 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2186 retval
= dpm
->instr_write_data_dcc(dpm
,
2187 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2190 if (retval
!= ERROR_OK
)
2193 if (size
== 4 && (address
% 4) == 0)
2194 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2196 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2198 if (retval
!= ERROR_OK
) {
2199 /* Unset DTR mode */
2200 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2201 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2203 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2204 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2207 /* Check for sticky abort flags in the DSCR */
2208 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2209 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2210 if (retval
!= ERROR_OK
)
2214 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2215 /* Abort occurred - clear it and exit */
2216 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2217 armv8_dpm_handle_exception(dpm
, true);
2225 static int aarch64_read_cpu_memory_slow(struct target
*target
,
2226 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2228 struct armv8_common
*armv8
= target_to_armv8(target
);
2229 struct arm_dpm
*dpm
= &armv8
->dpm
;
2230 struct arm
*arm
= &armv8
->arm
;
2233 armv8_reg_current(arm
, 1)->dirty
= true;
2235 /* change DCC to normal mode (if necessary) */
2236 if (*dscr
& DSCR_MA
) {
2238 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2239 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2240 if (retval
!= ERROR_OK
)
2245 uint32_t opcode
, data
;
2248 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
2250 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
2252 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
2253 retval
= dpm
->instr_execute(dpm
, opcode
);
2254 if (retval
!= ERROR_OK
)
2257 if (arm
->core_state
== ARM_STATE_AARCH64
)
2258 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
2260 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2261 if (retval
!= ERROR_OK
)
2264 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2265 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
2266 if (retval
!= ERROR_OK
)
2270 *buffer
= (uint8_t)data
;
2272 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
2274 target_buffer_set_u32(target
, buffer
, data
);
2284 static int aarch64_read_cpu_memory_fast(struct target
*target
,
2285 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2287 struct armv8_common
*armv8
= target_to_armv8(target
);
2288 struct arm_dpm
*dpm
= &armv8
->dpm
;
2289 struct arm
*arm
= &armv8
->arm
;
2293 /* Mark X1 as dirty */
2294 armv8_reg_current(arm
, 1)->dirty
= true;
2296 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2297 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2298 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
2300 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2301 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2304 if (retval
!= ERROR_OK
)
2307 /* Step 1.e - Change DCC to memory mode */
2309 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2310 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2311 if (retval
!= ERROR_OK
)
2314 /* Step 1.f - read DBGDTRTX and discard the value */
2315 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2316 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2317 if (retval
!= ERROR_OK
)
2321 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2322 * Abort flags are sticky, so can be read at end of transactions
2324 * This data is read in aligned to 32 bit boundary.
2328 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2329 * increments X0 by 4. */
2330 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
2331 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2332 if (retval
!= ERROR_OK
)
2336 /* Step 3.a - set DTR access mode back to Normal mode */
2338 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2339 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2340 if (retval
!= ERROR_OK
)
2343 /* Step 3.b - read DBGDTRTX for the final value */
2344 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2345 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2346 if (retval
!= ERROR_OK
)
2349 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2353 static int aarch64_read_cpu_memory(struct target
*target
,
2354 target_addr_t address
, uint32_t size
,
2355 uint32_t count
, uint8_t *buffer
)
2357 /* read memory through APB-AP */
2358 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2359 struct armv8_common
*armv8
= target_to_armv8(target
);
2360 struct arm_dpm
*dpm
= &armv8
->dpm
;
2361 struct arm
*arm
= &armv8
->arm
;
2364 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2365 address
, size
, count
);
2367 if (target
->state
!= TARGET_HALTED
) {
2368 LOG_WARNING("target not halted");
2369 return ERROR_TARGET_NOT_HALTED
;
2372 /* Mark register X0 as dirty, as it will be used
2373 * for transferring the data.
2374 * It will be restored automatically when exiting
2377 armv8_reg_current(arm
, 0)->dirty
= true;
2380 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2381 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2382 if (retval
!= ERROR_OK
)
2385 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2387 /* Set Normal access mode */
2389 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2390 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2391 if (retval
!= ERROR_OK
)
2394 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2395 /* Write X0 with value 'address' using write procedure */
2396 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2397 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2398 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2399 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2401 /* Write R0 with value 'address' using write procedure */
2402 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2403 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2404 retval
= dpm
->instr_write_data_dcc(dpm
,
2405 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2408 if (retval
!= ERROR_OK
)
2411 if (size
== 4 && (address
% 4) == 0)
2412 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2414 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2416 if (dscr
& DSCR_MA
) {
2418 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2419 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2422 if (retval
!= ERROR_OK
)
2425 /* Check for sticky abort flags in the DSCR */
2426 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2427 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2428 if (retval
!= ERROR_OK
)
2433 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2434 /* Abort occurred - clear it and exit */
2435 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2436 armv8_dpm_handle_exception(dpm
, true);
2444 static int aarch64_read_phys_memory(struct target
*target
,
2445 target_addr_t address
, uint32_t size
,
2446 uint32_t count
, uint8_t *buffer
)
2448 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2450 if (count
&& buffer
) {
2451 /* read memory through APB-AP */
2452 retval
= aarch64_mmu_modify(target
, 0);
2453 if (retval
!= ERROR_OK
)
2455 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2460 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2461 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2463 int mmu_enabled
= 0;
2466 /* determine if MMU was enabled on target stop */
2467 retval
= aarch64_mmu(target
, &mmu_enabled
);
2468 if (retval
!= ERROR_OK
)
2472 /* enable MMU as we could have disabled it for phys access */
2473 retval
= aarch64_mmu_modify(target
, 1);
2474 if (retval
!= ERROR_OK
)
2477 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2480 static int aarch64_write_phys_memory(struct target
*target
,
2481 target_addr_t address
, uint32_t size
,
2482 uint32_t count
, const uint8_t *buffer
)
2484 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2486 if (count
&& buffer
) {
2487 /* write memory through APB-AP */
2488 retval
= aarch64_mmu_modify(target
, 0);
2489 if (retval
!= ERROR_OK
)
2491 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2497 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2498 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2500 int mmu_enabled
= 0;
2503 /* determine if MMU was enabled on target stop */
2504 retval
= aarch64_mmu(target
, &mmu_enabled
);
2505 if (retval
!= ERROR_OK
)
2509 /* enable MMU as we could have disabled it for phys access */
2510 retval
= aarch64_mmu_modify(target
, 1);
2511 if (retval
!= ERROR_OK
)
2514 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2517 static int aarch64_handle_target_request(void *priv
)
2519 struct target
*target
= priv
;
2520 struct armv8_common
*armv8
= target_to_armv8(target
);
2523 if (!target_was_examined(target
))
2525 if (!target
->dbg_msg_enabled
)
2528 if (target
->state
== TARGET_RUNNING
) {
2531 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2532 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2534 /* check if we have data */
2535 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2536 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2537 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2538 if (retval
== ERROR_OK
) {
2539 target_request(target
, request
);
2540 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2541 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2549 static int aarch64_examine_first(struct target
*target
)
2551 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2552 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2553 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2554 struct aarch64_private_config
*pc
= target
->private_config
;
2556 int retval
= ERROR_OK
;
2557 uint64_t debug
, ttypr
;
2559 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2560 debug
= ttypr
= cpuid
= 0;
2565 if (pc
->adiv5_config
.ap_num
== DP_APSEL_INVALID
) {
2566 /* Search for the APB-AB */
2567 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2568 if (retval
!= ERROR_OK
) {
2569 LOG_ERROR("Could not find APB-AP for debug access");
2573 armv8
->debug_ap
= dap_ap(swjdp
, pc
->adiv5_config
.ap_num
);
2576 retval
= mem_ap_init(armv8
->debug_ap
);
2577 if (retval
!= ERROR_OK
) {
2578 LOG_ERROR("Could not initialize the APB-AP");
2582 armv8
->debug_ap
->memaccess_tck
= 10;
2584 if (!target
->dbgbase_set
) {
2586 /* Get ROM Table base */
2588 int32_t coreidx
= target
->coreid
;
2589 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2590 if (retval
!= ERROR_OK
)
2592 /* Lookup 0x15 -- Processor DAP */
2593 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2594 &armv8
->debug_base
, &coreidx
);
2595 if (retval
!= ERROR_OK
)
2597 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2598 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2600 armv8
->debug_base
= target
->dbgbase
;
2602 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2603 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2604 if (retval
!= ERROR_OK
) {
2605 LOG_DEBUG("Examine %s failed", "oslock");
2609 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2610 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2611 if (retval
!= ERROR_OK
) {
2612 LOG_DEBUG("Examine %s failed", "CPUID");
2616 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2617 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2618 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2619 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2620 if (retval
!= ERROR_OK
) {
2621 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2624 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2625 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2626 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2627 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2628 if (retval
!= ERROR_OK
) {
2629 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2633 retval
= dap_run(armv8
->debug_ap
->dap
);
2634 if (retval
!= ERROR_OK
) {
2635 LOG_ERROR("%s: examination failed\n", target_name(target
));
2640 ttypr
= (ttypr
<< 32) | tmp0
;
2642 debug
= (debug
<< 32) | tmp2
;
2644 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2645 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2646 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2648 if (pc
->cti
== NULL
)
2651 armv8
->cti
= pc
->cti
;
2653 retval
= aarch64_dpm_setup(aarch64
, debug
);
2654 if (retval
!= ERROR_OK
)
2657 /* Setup Breakpoint Register Pairs */
2658 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2659 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2660 aarch64
->brp_num_available
= aarch64
->brp_num
;
2661 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2662 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2663 aarch64
->brp_list
[i
].used
= 0;
2664 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2665 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2667 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2668 aarch64
->brp_list
[i
].value
= 0;
2669 aarch64
->brp_list
[i
].control
= 0;
2670 aarch64
->brp_list
[i
].BRPn
= i
;
2673 /* Setup Watchpoint Register Pairs */
2674 aarch64
->wp_num
= (uint32_t)((debug
>> 20) & 0x0F) + 1;
2675 aarch64
->wp_num_available
= aarch64
->wp_num
;
2676 aarch64
->wp_list
= calloc(aarch64
->wp_num
, sizeof(struct aarch64_brp
));
2677 for (i
= 0; i
< aarch64
->wp_num
; i
++) {
2678 aarch64
->wp_list
[i
].used
= 0;
2679 aarch64
->wp_list
[i
].type
= BRP_NORMAL
;
2680 aarch64
->wp_list
[i
].value
= 0;
2681 aarch64
->wp_list
[i
].control
= 0;
2682 aarch64
->wp_list
[i
].BRPn
= i
;
2685 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2686 aarch64
->brp_num
, aarch64
->wp_num
);
2688 target
->state
= TARGET_UNKNOWN
;
2689 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2690 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2691 target_set_examined(target
);
2695 static int aarch64_examine(struct target
*target
)
2697 int retval
= ERROR_OK
;
2699 /* don't re-probe hardware after each reset */
2700 if (!target_was_examined(target
))
2701 retval
= aarch64_examine_first(target
);
2703 /* Configure core debug access */
2704 if (retval
== ERROR_OK
)
2705 retval
= aarch64_init_debug_access(target
);
2711 * Cortex-A8 target creation and initialization
2714 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2715 struct target
*target
)
2717 /* examine_first() does a bunch of this */
2718 arm_semihosting_init(target
);
2722 static int aarch64_init_arch_info(struct target
*target
,
2723 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2725 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2727 /* Setup struct aarch64_common */
2728 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2729 armv8
->arm
.dap
= dap
;
2731 /* register arch-specific functions */
2732 armv8
->examine_debug_reason
= NULL
;
2733 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2734 armv8
->pre_restore_context
= NULL
;
2735 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2737 armv8_init_arch_info(target
, armv8
);
2738 target_register_timer_callback(aarch64_handle_target_request
, 1,
2739 TARGET_TIMER_TYPE_PERIODIC
, target
);
2744 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2746 struct aarch64_private_config
*pc
= target
->private_config
;
2747 struct aarch64_common
*aarch64
;
2749 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2752 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2753 if (aarch64
== NULL
) {
2754 LOG_ERROR("Out of memory");
2758 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2761 static void aarch64_deinit_target(struct target
*target
)
2763 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2764 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2765 struct arm_dpm
*dpm
= &armv8
->dpm
;
2767 armv8_free_reg_cache(target
);
2768 free(aarch64
->brp_list
);
2771 free(target
->private_config
);
2775 static int aarch64_mmu(struct target
*target
, int *enabled
)
2777 if (target
->state
!= TARGET_HALTED
) {
2778 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2779 return ERROR_TARGET_INVALID
;
2782 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2786 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2787 target_addr_t
*phys
)
2789 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2793 * private target configuration items
2795 enum aarch64_cfg_param
{
2799 static const Jim_Nvp nvp_config_opts
[] = {
2800 { .name
= "-cti", .value
= CFG_CTI
},
2801 { .name
= NULL
, .value
= -1 }
2804 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2806 struct aarch64_private_config
*pc
;
2810 pc
= (struct aarch64_private_config
*)target
->private_config
;
2812 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2813 pc
->adiv5_config
.ap_num
= DP_APSEL_INVALID
;
2814 target
->private_config
= pc
;
2818 * Call adiv5_jim_configure() to parse the common DAP options
2819 * It will return JIM_CONTINUE if it didn't find any known
2820 * options, JIM_OK if it correctly parsed the topmost option
2821 * and JIM_ERR if an error occurred during parameter evaluation.
2822 * For JIM_CONTINUE, we check our own params.
2824 * adiv5_jim_configure() assumes 'private_config' to point to
2825 * 'struct adiv5_private_config'. Override 'private_config'!
2827 target
->private_config
= &pc
->adiv5_config
;
2828 e
= adiv5_jim_configure(target
, goi
);
2829 target
->private_config
= pc
;
2830 if (e
!= JIM_CONTINUE
)
2833 /* parse config or cget options ... */
2834 if (goi
->argc
> 0) {
2835 Jim_SetEmptyResult(goi
->interp
);
2837 /* check first if topmost item is for us */
2838 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2841 return JIM_CONTINUE
;
2843 e
= Jim_GetOpt_Obj(goi
, NULL
);
2849 if (goi
->isconfigure
) {
2851 struct arm_cti
*cti
;
2852 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2855 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2857 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2862 if (goi
->argc
!= 0) {
2863 Jim_WrongNumArgs(goi
->interp
,
2864 goi
->argc
, goi
->argv
,
2869 if (pc
== NULL
|| pc
->cti
== NULL
) {
2870 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2873 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2879 return JIM_CONTINUE
;
2886 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2888 struct target
*target
= get_current_target(CMD_CTX
);
2889 struct armv8_common
*armv8
= target_to_armv8(target
);
2891 return armv8_handle_cache_info_command(CMD
,
2892 &armv8
->armv8_mmu
.armv8_cache
);
2895 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2897 struct target
*target
= get_current_target(CMD_CTX
);
2898 if (!target_was_examined(target
)) {
2899 LOG_ERROR("target not examined yet");
2903 return aarch64_init_debug_access(target
);
2906 COMMAND_HANDLER(aarch64_handle_disassemble_command
)
2908 struct target
*target
= get_current_target(CMD_CTX
);
2910 if (target
== NULL
) {
2911 LOG_ERROR("No target selected");
2915 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2917 if (aarch64
->common_magic
!= AARCH64_COMMON_MAGIC
) {
2918 command_print(CMD
, "current target isn't an AArch64");
2923 target_addr_t address
;
2927 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], count
);
2930 COMMAND_PARSE_ADDRESS(CMD_ARGV
[0], address
);
2933 return ERROR_COMMAND_SYNTAX_ERROR
;
2936 return a64_disassemble(CMD
, target
, address
, count
);
2939 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2941 struct target
*target
= get_current_target(CMD_CTX
);
2942 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2944 static const Jim_Nvp nvp_maskisr_modes
[] = {
2945 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2946 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2947 { .name
= NULL
, .value
= -1 },
2952 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2953 if (n
->name
== NULL
) {
2954 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2955 return ERROR_COMMAND_SYNTAX_ERROR
;
2958 aarch64
->isrmasking_mode
= n
->value
;
2961 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2962 command_print(CMD
, "aarch64 interrupt mask %s", n
->name
);
2967 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2969 struct command
*c
= jim_to_command(interp
);
2970 struct command_context
*context
;
2971 struct target
*target
;
2974 bool is_mcr
= false;
2977 if (!strcmp(c
->name
, "mcr")) {
2984 context
= current_command_context(interp
);
2985 assert(context
!= NULL
);
2987 target
= get_current_target(context
);
2988 if (target
== NULL
) {
2989 LOG_ERROR("%s: no current target", __func__
);
2992 if (!target_was_examined(target
)) {
2993 LOG_ERROR("%s: not yet examined", target_name(target
));
2997 arm
= target_to_arm(target
);
2999 LOG_ERROR("%s: not an ARM", target_name(target
));
3003 if (target
->state
!= TARGET_HALTED
)
3004 return ERROR_TARGET_NOT_HALTED
;
3006 if (arm
->core_state
== ARM_STATE_AARCH64
) {
3007 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
3011 if (argc
!= arg_cnt
) {
3012 LOG_ERROR("%s: wrong number of arguments", __func__
);
3024 /* NOTE: parameter sequence matches ARM instruction set usage:
3025 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3026 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3027 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3029 retval
= Jim_GetLong(interp
, argv
[1], &l
);
3030 if (retval
!= JIM_OK
)
3033 LOG_ERROR("%s: %s %d out of range", __func__
,
3034 "coprocessor", (int) l
);
3039 retval
= Jim_GetLong(interp
, argv
[2], &l
);
3040 if (retval
!= JIM_OK
)
3043 LOG_ERROR("%s: %s %d out of range", __func__
,
3049 retval
= Jim_GetLong(interp
, argv
[3], &l
);
3050 if (retval
!= JIM_OK
)
3053 LOG_ERROR("%s: %s %d out of range", __func__
,
3059 retval
= Jim_GetLong(interp
, argv
[4], &l
);
3060 if (retval
!= JIM_OK
)
3063 LOG_ERROR("%s: %s %d out of range", __func__
,
3069 retval
= Jim_GetLong(interp
, argv
[5], &l
);
3070 if (retval
!= JIM_OK
)
3073 LOG_ERROR("%s: %s %d out of range", __func__
,
3081 if (is_mcr
== true) {
3082 retval
= Jim_GetLong(interp
, argv
[6], &l
);
3083 if (retval
!= JIM_OK
)
3087 /* NOTE: parameters reordered! */
3088 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
3089 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
3090 if (retval
!= ERROR_OK
)
3093 /* NOTE: parameters reordered! */
3094 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
3095 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
3096 if (retval
!= ERROR_OK
)
3099 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
3105 static const struct command_registration aarch64_exec_command_handlers
[] = {
3107 .name
= "cache_info",
3108 .handler
= aarch64_handle_cache_info_command
,
3109 .mode
= COMMAND_EXEC
,
3110 .help
= "display information about target caches",
3115 .handler
= aarch64_handle_dbginit_command
,
3116 .mode
= COMMAND_EXEC
,
3117 .help
= "Initialize core debug",
3121 .name
= "disassemble",
3122 .handler
= aarch64_handle_disassemble_command
,
3123 .mode
= COMMAND_EXEC
,
3124 .help
= "Disassemble instructions",
3125 .usage
= "address [count]",
3129 .handler
= aarch64_mask_interrupts_command
,
3130 .mode
= COMMAND_ANY
,
3131 .help
= "mask aarch64 interrupts during single-step",
3132 .usage
= "['on'|'off']",
3136 .mode
= COMMAND_EXEC
,
3137 .jim_handler
= jim_mcrmrc
,
3138 .help
= "write coprocessor register",
3139 .usage
= "cpnum op1 CRn CRm op2 value",
3143 .mode
= COMMAND_EXEC
,
3144 .jim_handler
= jim_mcrmrc
,
3145 .help
= "read coprocessor register",
3146 .usage
= "cpnum op1 CRn CRm op2",
3149 .chain
= smp_command_handlers
,
3153 COMMAND_REGISTRATION_DONE
3156 extern const struct command_registration semihosting_common_handlers
[];
3158 static const struct command_registration aarch64_command_handlers
[] = {
3161 .mode
= COMMAND_ANY
,
3162 .help
= "ARM Command Group",
3164 .chain
= semihosting_common_handlers
3167 .chain
= armv8_command_handlers
,
3171 .mode
= COMMAND_ANY
,
3172 .help
= "Aarch64 command group",
3174 .chain
= aarch64_exec_command_handlers
,
3176 COMMAND_REGISTRATION_DONE
3179 struct target_type aarch64_target
= {
3182 .poll
= aarch64_poll
,
3183 .arch_state
= armv8_arch_state
,
3185 .halt
= aarch64_halt
,
3186 .resume
= aarch64_resume
,
3187 .step
= aarch64_step
,
3189 .assert_reset
= aarch64_assert_reset
,
3190 .deassert_reset
= aarch64_deassert_reset
,
3192 /* REVISIT allow exporting VFP3 registers ... */
3193 .get_gdb_arch
= armv8_get_gdb_arch
,
3194 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
3196 .read_memory
= aarch64_read_memory
,
3197 .write_memory
= aarch64_write_memory
,
3199 .add_breakpoint
= aarch64_add_breakpoint
,
3200 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
3201 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
3202 .remove_breakpoint
= aarch64_remove_breakpoint
,
3203 .add_watchpoint
= aarch64_add_watchpoint
,
3204 .remove_watchpoint
= aarch64_remove_watchpoint
,
3205 .hit_watchpoint
= aarch64_hit_watchpoint
,
3207 .commands
= aarch64_command_handlers
,
3208 .target_create
= aarch64_target_create
,
3209 .target_jim_configure
= aarch64_jim_configure
,
3210 .init_target
= aarch64_init_target
,
3211 .deinit_target
= aarch64_deinit_target
,
3212 .examine
= aarch64_examine
,
3214 .read_phys_memory
= aarch64_read_phys_memory
,
3215 .write_phys_memory
= aarch64_write_phys_memory
,
3217 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)