1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
43 static int aarch64_poll(struct target
*target
);
44 static int aarch64_debug_entry(struct target
*target
);
45 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
46 static int aarch64_set_breakpoint(struct target
*target
,
47 struct breakpoint
*breakpoint
, uint8_t matchmode
);
48 static int aarch64_set_context_breakpoint(struct target
*target
,
49 struct breakpoint
*breakpoint
, uint8_t matchmode
);
50 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
51 struct breakpoint
*breakpoint
);
52 static int aarch64_unset_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
);
54 static int aarch64_mmu(struct target
*target
, int *enabled
);
55 static int aarch64_virt2phys(struct target
*target
,
56 target_addr_t virt
, target_addr_t
*phys
);
57 static int aarch64_read_cpu_memory(struct target
*target
,
58 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
63 static int aarch64_restore_system_control_reg(struct target
*target
)
65 enum arm_mode target_mode
= ARM_MODE_ANY
;
66 int retval
= ERROR_OK
;
69 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
70 struct armv8_common
*armv8
= target_to_armv8(target
);
72 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
73 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
74 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
76 switch (armv8
->arm
.core_mode
) {
78 target_mode
= ARMV8_64_EL1H
;
82 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
86 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
90 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
97 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
101 LOG_INFO("cannot read system control register in this mode");
105 if (target_mode
!= ARM_MODE_ANY
)
106 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
108 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
109 if (retval
!= ERROR_OK
)
112 if (target_mode
!= ARM_MODE_ANY
)
113 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target
*target
, int enable
)
124 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
125 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
126 int retval
= ERROR_OK
;
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64
->system_control_reg
& 0x1U
)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
136 aarch64
->system_control_reg_curr
|= 0x1U
;
138 if (aarch64
->system_control_reg_curr
& 0x4U
) {
139 /* data cache is active */
140 aarch64
->system_control_reg_curr
&= ~0x4U
;
141 /* flush data cache armv8 function to be called */
142 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
143 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
145 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
146 aarch64
->system_control_reg_curr
&= ~0x1U
;
150 switch (armv8
->arm
.core_mode
) {
154 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
158 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
162 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
169 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
173 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
177 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
178 aarch64
->system_control_reg_curr
);
183 * Basic debug access, very low level assumes state is saved
185 static int aarch64_init_debug_access(struct target
*target
)
187 struct armv8_common
*armv8
= target_to_armv8(target
);
191 LOG_DEBUG("%s", target_name(target
));
193 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
194 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
195 if (retval
!= ERROR_OK
) {
196 LOG_DEBUG("Examine %s failed", "oslock");
200 /* Clear Sticky Power Down status Bit in PRSR to enable access to
201 the registers in the Core Power Domain */
202 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
203 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
204 if (retval
!= ERROR_OK
)
208 * Static CTI configuration:
209 * Channel 0 -> trigger outputs HALT request to PE
210 * Channel 1 -> trigger outputs Resume request to PE
211 * Gate all channel trigger events from entering the CTM
215 retval
= arm_cti_enable(armv8
->cti
, true);
216 /* By default, gate all channel events to and from the CTM */
217 if (retval
== ERROR_OK
)
218 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
219 /* output halt requests to PE on channel 0 event */
220 if (retval
== ERROR_OK
)
221 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
222 /* output restart requests to PE on channel 1 event */
223 if (retval
== ERROR_OK
)
224 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
225 if (retval
!= ERROR_OK
)
228 /* Resync breakpoint registers */
233 /* Write to memory mapped registers directly with no cache or mmu handling */
234 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
239 struct armv8_common
*armv8
= target_to_armv8(target
);
241 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
246 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
248 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
251 dpm
->arm
= &a8
->armv8_common
.arm
;
254 retval
= armv8_dpm_setup(dpm
);
255 if (retval
== ERROR_OK
)
256 retval
= armv8_dpm_initialize(dpm
);
261 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
263 struct armv8_common
*armv8
= target_to_armv8(target
);
264 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
267 static int aarch64_check_state_one(struct target
*target
,
268 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
270 struct armv8_common
*armv8
= target_to_armv8(target
);
274 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
275 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
276 if (retval
!= ERROR_OK
)
283 *p_result
= (prsr
& mask
) == (val
& mask
);
288 static int aarch64_wait_halt_one(struct target
*target
)
290 int retval
= ERROR_OK
;
293 int64_t then
= timeval_ms();
297 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
298 if (retval
!= ERROR_OK
|| halted
)
301 if (timeval_ms() > then
+ 1000) {
302 retval
= ERROR_TARGET_TIMEOUT
;
303 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
310 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
312 int retval
= ERROR_OK
;
313 struct target_list
*head
= target
->head
;
314 struct target
*first
= NULL
;
316 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
318 while (head
!= NULL
) {
319 struct target
*curr
= head
->target
;
320 struct armv8_common
*armv8
= target_to_armv8(curr
);
323 if (exc_target
&& curr
== target
)
325 if (!target_was_examined(curr
))
327 if (curr
->state
!= TARGET_RUNNING
)
330 /* HACK: mark this target as prepared for halting */
331 curr
->debug_reason
= DBG_REASON_DBGRQ
;
333 /* open the gate for channel 0 to let HALT requests pass to the CTM */
334 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
335 if (retval
== ERROR_OK
)
336 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
337 if (retval
!= ERROR_OK
)
340 LOG_DEBUG("target %s prepared", target_name(curr
));
347 if (exc_target
&& first
)
356 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
358 int retval
= ERROR_OK
;
359 struct armv8_common
*armv8
= target_to_armv8(target
);
361 LOG_DEBUG("%s", target_name(target
));
363 /* allow Halting Debug Mode */
364 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
365 if (retval
!= ERROR_OK
)
368 /* trigger an event on channel 0, this outputs a halt request to the PE */
369 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
370 if (retval
!= ERROR_OK
)
373 if (mode
== HALT_SYNC
) {
374 retval
= aarch64_wait_halt_one(target
);
375 if (retval
!= ERROR_OK
) {
376 if (retval
== ERROR_TARGET_TIMEOUT
)
377 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
385 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
387 struct target
*next
= target
;
390 /* prepare halt on all PEs of the group */
391 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
393 if (exc_target
&& next
== target
)
396 /* halt the target PE */
397 if (retval
== ERROR_OK
)
398 retval
= aarch64_halt_one(next
, HALT_LAZY
);
400 if (retval
!= ERROR_OK
)
403 /* wait for all PEs to halt */
404 int64_t then
= timeval_ms();
406 bool all_halted
= true;
407 struct target_list
*head
;
410 foreach_smp_target(head
, target
->head
) {
415 if (!target_was_examined(curr
))
418 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
419 if (retval
!= ERROR_OK
|| !halted
) {
428 if (timeval_ms() > then
+ 1000) {
429 retval
= ERROR_TARGET_TIMEOUT
;
434 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
435 * and it looks like the CTI's are not connected by a common
436 * trigger matrix. It seems that we need to halt one core in each
437 * cluster explicitly. So if we find that a core has not halted
438 * yet, we trigger an explicit halt for the second cluster.
440 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
441 if (retval
!= ERROR_OK
)
448 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
450 struct target
*gdb_target
= NULL
;
451 struct target_list
*head
;
454 if (debug_reason
== DBG_REASON_NOTHALTED
) {
455 LOG_INFO("Halting remaining targets in SMP group");
456 aarch64_halt_smp(target
, true);
459 /* poll all targets in the group, but skip the target that serves GDB */
460 foreach_smp_target(head
, target
->head
) {
462 /* skip calling context */
465 if (!target_was_examined(curr
))
467 /* skip targets that were already halted */
468 if (curr
->state
== TARGET_HALTED
)
470 /* remember the gdb_service->target */
471 if (curr
->gdb_service
!= NULL
)
472 gdb_target
= curr
->gdb_service
->target
;
474 if (curr
== gdb_target
)
477 /* avoid recursion in aarch64_poll() */
483 /* after all targets were updated, poll the gdb serving target */
484 if (gdb_target
!= NULL
&& gdb_target
!= target
)
485 aarch64_poll(gdb_target
);
491 * Aarch64 Run control
494 static int aarch64_poll(struct target
*target
)
496 enum target_state prev_target_state
;
497 int retval
= ERROR_OK
;
500 retval
= aarch64_check_state_one(target
,
501 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
502 if (retval
!= ERROR_OK
)
506 prev_target_state
= target
->state
;
507 if (prev_target_state
!= TARGET_HALTED
) {
508 enum target_debug_reason debug_reason
= target
->debug_reason
;
510 /* We have a halting debug event */
511 target
->state
= TARGET_HALTED
;
512 LOG_DEBUG("Target %s halted", target_name(target
));
513 retval
= aarch64_debug_entry(target
);
514 if (retval
!= ERROR_OK
)
518 update_halt_gdb(target
, debug_reason
);
520 switch (prev_target_state
) {
524 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
526 case TARGET_DEBUG_RUNNING
:
527 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
534 target
->state
= TARGET_RUNNING
;
539 static int aarch64_halt(struct target
*target
)
542 return aarch64_halt_smp(target
, false);
544 return aarch64_halt_one(target
, HALT_SYNC
);
547 static int aarch64_restore_one(struct target
*target
, int current
,
548 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
550 struct armv8_common
*armv8
= target_to_armv8(target
);
551 struct arm
*arm
= &armv8
->arm
;
555 LOG_DEBUG("%s", target_name(target
));
557 if (!debug_execution
)
558 target_free_all_working_areas(target
);
560 /* current = 1: continue on current pc, otherwise continue at <address> */
561 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
563 resume_pc
= *address
;
565 *address
= resume_pc
;
567 /* Make sure that the Armv7 gdb thumb fixups does not
568 * kill the return address
570 switch (arm
->core_state
) {
572 resume_pc
&= 0xFFFFFFFC;
574 case ARM_STATE_AARCH64
:
575 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
577 case ARM_STATE_THUMB
:
578 case ARM_STATE_THUMB_EE
:
579 /* When the return address is loaded into PC
580 * bit 0 must be 1 to stay in Thumb state
584 case ARM_STATE_JAZELLE
:
585 LOG_ERROR("How do I resume into Jazelle state??");
588 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
589 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
593 /* called it now before restoring context because it uses cpu
594 * register r0 for restoring system control register */
595 retval
= aarch64_restore_system_control_reg(target
);
596 if (retval
== ERROR_OK
)
597 retval
= aarch64_restore_context(target
, handle_breakpoints
);
603 * prepare single target for restart
607 static int aarch64_prepare_restart_one(struct target
*target
)
609 struct armv8_common
*armv8
= target_to_armv8(target
);
614 LOG_DEBUG("%s", target_name(target
));
616 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
617 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
618 if (retval
!= ERROR_OK
)
621 if ((dscr
& DSCR_ITE
) == 0)
622 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
623 if ((dscr
& DSCR_ERR
) != 0)
624 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
626 /* acknowledge a pending CTI halt event */
627 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
629 * open the CTI gate for channel 1 so that the restart events
630 * get passed along to all PEs. Also close gate for channel 0
631 * to isolate the PE from halt events.
633 if (retval
== ERROR_OK
)
634 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
635 if (retval
== ERROR_OK
)
636 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
638 /* make sure that DSCR.HDE is set */
639 if (retval
== ERROR_OK
) {
641 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
642 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
645 if (retval
== ERROR_OK
) {
646 /* clear sticky bits in PRSR, SDR is now 0 */
647 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
648 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
654 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
656 struct armv8_common
*armv8
= target_to_armv8(target
);
659 LOG_DEBUG("%s", target_name(target
));
661 /* trigger an event on channel 1, generates a restart request to the PE */
662 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
663 if (retval
!= ERROR_OK
)
666 if (mode
== RESTART_SYNC
) {
667 int64_t then
= timeval_ms();
671 * if PRSR.SDR is set now, the target did restart, even
672 * if it's now already halted again (e.g. due to breakpoint)
674 retval
= aarch64_check_state_one(target
,
675 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
676 if (retval
!= ERROR_OK
|| resumed
)
679 if (timeval_ms() > then
+ 1000) {
680 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
681 retval
= ERROR_TARGET_TIMEOUT
;
687 if (retval
!= ERROR_OK
)
690 target
->debug_reason
= DBG_REASON_NOTHALTED
;
691 target
->state
= TARGET_RUNNING
;
696 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
700 LOG_DEBUG("%s", target_name(target
));
702 retval
= aarch64_prepare_restart_one(target
);
703 if (retval
== ERROR_OK
)
704 retval
= aarch64_do_restart_one(target
, mode
);
710 * prepare all but the current target for restart
712 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
714 int retval
= ERROR_OK
;
715 struct target_list
*head
;
716 struct target
*first
= NULL
;
719 foreach_smp_target(head
, target
->head
) {
720 struct target
*curr
= head
->target
;
722 /* skip calling target */
725 if (!target_was_examined(curr
))
727 if (curr
->state
!= TARGET_HALTED
)
730 /* resume at current address, not in step mode */
731 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
732 if (retval
== ERROR_OK
)
733 retval
= aarch64_prepare_restart_one(curr
);
734 if (retval
!= ERROR_OK
) {
735 LOG_ERROR("failed to restore target %s", target_name(curr
));
738 /* remember the first valid target in the group */
750 static int aarch64_step_restart_smp(struct target
*target
)
752 int retval
= ERROR_OK
;
753 struct target_list
*head
;
754 struct target
*first
= NULL
;
756 LOG_DEBUG("%s", target_name(target
));
758 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
759 if (retval
!= ERROR_OK
)
763 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
764 if (retval
!= ERROR_OK
) {
765 LOG_DEBUG("error restarting target %s", target_name(first
));
769 int64_t then
= timeval_ms();
771 struct target
*curr
= target
;
772 bool all_resumed
= true;
774 foreach_smp_target(head
, target
->head
) {
783 if (!target_was_examined(curr
))
786 retval
= aarch64_check_state_one(curr
,
787 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
788 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
793 if (curr
->state
!= TARGET_RUNNING
) {
794 curr
->state
= TARGET_RUNNING
;
795 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
796 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
803 if (timeval_ms() > then
+ 1000) {
804 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
805 retval
= ERROR_TARGET_TIMEOUT
;
809 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
810 * and it looks like the CTI's are not connected by a common
811 * trigger matrix. It seems that we need to halt one core in each
812 * cluster explicitly. So if we find that a core has not halted
813 * yet, we trigger an explicit resume for the second cluster.
815 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
816 if (retval
!= ERROR_OK
)
823 static int aarch64_resume(struct target
*target
, int current
,
824 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
827 uint64_t addr
= address
;
829 if (target
->state
!= TARGET_HALTED
)
830 return ERROR_TARGET_NOT_HALTED
;
833 * If this target is part of a SMP group, prepare the others
834 * targets for resuming. This involves restoring the complete
835 * target register context and setting up CTI gates to accept
836 * resume events from the trigger matrix.
839 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
840 if (retval
!= ERROR_OK
)
844 /* all targets prepared, restore and restart the current target */
845 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
847 if (retval
== ERROR_OK
)
848 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
849 if (retval
!= ERROR_OK
)
853 int64_t then
= timeval_ms();
855 struct target
*curr
= target
;
856 struct target_list
*head
;
857 bool all_resumed
= true;
859 foreach_smp_target(head
, target
->head
) {
866 if (!target_was_examined(curr
))
869 retval
= aarch64_check_state_one(curr
,
870 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
871 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
876 if (curr
->state
!= TARGET_RUNNING
) {
877 curr
->state
= TARGET_RUNNING
;
878 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
879 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
886 if (timeval_ms() > then
+ 1000) {
887 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
888 retval
= ERROR_TARGET_TIMEOUT
;
893 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
894 * and it looks like the CTI's are not connected by a common
895 * trigger matrix. It seems that we need to halt one core in each
896 * cluster explicitly. So if we find that a core has not halted
897 * yet, we trigger an explicit resume for the second cluster.
899 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
900 if (retval
!= ERROR_OK
)
905 if (retval
!= ERROR_OK
)
908 target
->debug_reason
= DBG_REASON_NOTHALTED
;
910 if (!debug_execution
) {
911 target
->state
= TARGET_RUNNING
;
912 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
913 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
915 target
->state
= TARGET_DEBUG_RUNNING
;
916 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
917 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
923 static int aarch64_debug_entry(struct target
*target
)
925 int retval
= ERROR_OK
;
926 struct armv8_common
*armv8
= target_to_armv8(target
);
927 struct arm_dpm
*dpm
= &armv8
->dpm
;
928 enum arm_state core_state
;
931 /* make sure to clear all sticky errors */
932 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
933 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
934 if (retval
== ERROR_OK
)
935 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
936 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
937 if (retval
== ERROR_OK
)
938 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
940 if (retval
!= ERROR_OK
)
943 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
946 core_state
= armv8_dpm_get_core_state(dpm
);
947 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
948 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
950 /* close the CTI gate for all events */
951 if (retval
== ERROR_OK
)
952 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
953 /* discard async exceptions */
954 if (retval
== ERROR_OK
)
955 retval
= dpm
->instr_cpsr_sync(dpm
);
956 if (retval
!= ERROR_OK
)
959 /* Examine debug reason */
960 armv8_dpm_report_dscr(dpm
, dscr
);
962 /* save address of instruction that triggered the watchpoint? */
963 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
967 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
968 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
970 if (retval
!= ERROR_OK
)
974 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
975 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
977 if (retval
!= ERROR_OK
)
980 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
983 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
985 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
986 retval
= armv8
->post_debug_entry(target
);
991 static int aarch64_post_debug_entry(struct target
*target
)
993 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
994 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
996 enum arm_mode target_mode
= ARM_MODE_ANY
;
999 switch (armv8
->arm
.core_mode
) {
1001 target_mode
= ARMV8_64_EL1H
;
1005 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1009 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1013 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1020 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1024 LOG_INFO("cannot read system control register in this mode");
1028 if (target_mode
!= ARM_MODE_ANY
)
1029 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1031 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1032 if (retval
!= ERROR_OK
)
1035 if (target_mode
!= ARM_MODE_ANY
)
1036 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1038 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1039 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1041 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1042 armv8_identify_cache(armv8
);
1043 armv8_read_mpidr(armv8
);
1046 armv8
->armv8_mmu
.mmu_enabled
=
1047 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1048 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1049 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1050 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1051 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1056 * single-step a target
1058 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1059 int handle_breakpoints
)
1061 struct armv8_common
*armv8
= target_to_armv8(target
);
1062 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1063 int saved_retval
= ERROR_OK
;
1067 if (target
->state
!= TARGET_HALTED
) {
1068 LOG_WARNING("target not halted");
1069 return ERROR_TARGET_NOT_HALTED
;
1072 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1073 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1074 /* make sure EDECR.SS is not set when restoring the register */
1076 if (retval
== ERROR_OK
) {
1078 /* set EDECR.SS to enter hardware step mode */
1079 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1080 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1082 /* disable interrupts while stepping */
1083 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1084 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1085 /* bail out if stepping setup has failed */
1086 if (retval
!= ERROR_OK
)
1089 if (target
->smp
&& !handle_breakpoints
) {
1091 * isolate current target so that it doesn't get resumed
1092 * together with the others
1094 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1095 /* resume all other targets in the group */
1096 if (retval
== ERROR_OK
)
1097 retval
= aarch64_step_restart_smp(target
);
1098 if (retval
!= ERROR_OK
) {
1099 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1102 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1105 /* all other targets running, restore and restart the current target */
1106 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1107 if (retval
== ERROR_OK
)
1108 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1110 if (retval
!= ERROR_OK
)
1113 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1114 if (!handle_breakpoints
)
1115 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1117 int64_t then
= timeval_ms();
1122 retval
= aarch64_check_state_one(target
,
1123 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1124 if (retval
!= ERROR_OK
|| stepped
)
1127 if (timeval_ms() > then
+ 100) {
1128 LOG_ERROR("timeout waiting for target %s halt after step",
1129 target_name(target
));
1130 retval
= ERROR_TARGET_TIMEOUT
;
1136 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1137 * causes a timeout. The core takes the step but doesn't complete it and so
1138 * debug state is never entered. However, you can manually halt the core
1139 * as an external debug even is also a WFI wakeup event.
1141 if (retval
== ERROR_TARGET_TIMEOUT
)
1142 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1145 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1146 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1147 if (retval
!= ERROR_OK
)
1150 /* restore interrupts */
1151 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1152 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1153 if (retval
!= ERROR_OK
)
1157 if (saved_retval
!= ERROR_OK
)
1158 return saved_retval
;
1160 return aarch64_poll(target
);
1163 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1165 struct armv8_common
*armv8
= target_to_armv8(target
);
1166 struct arm
*arm
= &armv8
->arm
;
1170 LOG_DEBUG("%s", target_name(target
));
1172 if (armv8
->pre_restore_context
)
1173 armv8
->pre_restore_context(target
);
1175 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1176 if (retval
== ERROR_OK
) {
1177 /* registers are now invalid */
1178 register_cache_invalidate(arm
->core_cache
);
1179 register_cache_invalidate(arm
->core_cache
->next
);
1186 * Cortex-A8 Breakpoint and watchpoint functions
1189 /* Setup hardware Breakpoint Register Pair */
1190 static int aarch64_set_breakpoint(struct target
*target
,
1191 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1196 uint8_t byte_addr_select
= 0x0F;
1197 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1198 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1199 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1201 if (breakpoint
->set
) {
1202 LOG_WARNING("breakpoint already set");
1206 if (breakpoint
->type
== BKPT_HARD
) {
1208 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1210 if (brp_i
>= aarch64
->brp_num
) {
1211 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1212 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1214 breakpoint
->set
= brp_i
+ 1;
1215 if (breakpoint
->length
== 2)
1216 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1217 control
= ((matchmode
& 0x7) << 20)
1219 | (byte_addr_select
<< 5)
1221 brp_list
[brp_i
].used
= 1;
1222 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1223 brp_list
[brp_i
].control
= control
;
1224 bpt_value
= brp_list
[brp_i
].value
;
1226 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1227 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1228 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1229 if (retval
!= ERROR_OK
)
1231 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1232 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1233 (uint32_t)(bpt_value
>> 32));
1234 if (retval
!= ERROR_OK
)
1237 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1238 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1239 brp_list
[brp_i
].control
);
1240 if (retval
!= ERROR_OK
)
1242 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1243 brp_list
[brp_i
].control
,
1244 brp_list
[brp_i
].value
);
1246 } else if (breakpoint
->type
== BKPT_SOFT
) {
1249 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1250 retval
= target_read_memory(target
,
1251 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1252 breakpoint
->length
, 1,
1253 breakpoint
->orig_instr
);
1254 if (retval
!= ERROR_OK
)
1257 armv8_cache_d_inner_flush_virt(armv8
,
1258 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1259 breakpoint
->length
);
1261 retval
= target_write_memory(target
,
1262 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1263 breakpoint
->length
, 1, code
);
1264 if (retval
!= ERROR_OK
)
1267 armv8_cache_d_inner_flush_virt(armv8
,
1268 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1269 breakpoint
->length
);
1271 armv8_cache_i_inner_inval_virt(armv8
,
1272 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1273 breakpoint
->length
);
1275 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1278 /* Ensure that halting debug mode is enable */
1279 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1280 if (retval
!= ERROR_OK
) {
1281 LOG_DEBUG("Failed to set DSCR.HDE");
1288 static int aarch64_set_context_breakpoint(struct target
*target
,
1289 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1291 int retval
= ERROR_FAIL
;
1294 uint8_t byte_addr_select
= 0x0F;
1295 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1296 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1297 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1299 if (breakpoint
->set
) {
1300 LOG_WARNING("breakpoint already set");
1303 /*check available context BRPs*/
1304 while ((brp_list
[brp_i
].used
||
1305 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1308 if (brp_i
>= aarch64
->brp_num
) {
1309 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1313 breakpoint
->set
= brp_i
+ 1;
1314 control
= ((matchmode
& 0x7) << 20)
1316 | (byte_addr_select
<< 5)
1318 brp_list
[brp_i
].used
= 1;
1319 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1320 brp_list
[brp_i
].control
= control
;
1321 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1322 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1323 brp_list
[brp_i
].value
);
1324 if (retval
!= ERROR_OK
)
1326 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1327 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1328 brp_list
[brp_i
].control
);
1329 if (retval
!= ERROR_OK
)
1331 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1332 brp_list
[brp_i
].control
,
1333 brp_list
[brp_i
].value
);
1338 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1340 int retval
= ERROR_FAIL
;
1341 int brp_1
= 0; /* holds the contextID pair */
1342 int brp_2
= 0; /* holds the IVA pair */
1343 uint32_t control_CTX
, control_IVA
;
1344 uint8_t CTX_byte_addr_select
= 0x0F;
1345 uint8_t IVA_byte_addr_select
= 0x0F;
1346 uint8_t CTX_machmode
= 0x03;
1347 uint8_t IVA_machmode
= 0x01;
1348 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1349 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1350 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1352 if (breakpoint
->set
) {
1353 LOG_WARNING("breakpoint already set");
1356 /*check available context BRPs*/
1357 while ((brp_list
[brp_1
].used
||
1358 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1361 printf("brp(CTX) found num: %d\n", brp_1
);
1362 if (brp_1
>= aarch64
->brp_num
) {
1363 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1367 while ((brp_list
[brp_2
].used
||
1368 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1371 printf("brp(IVA) found num: %d\n", brp_2
);
1372 if (brp_2
>= aarch64
->brp_num
) {
1373 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1377 breakpoint
->set
= brp_1
+ 1;
1378 breakpoint
->linked_BRP
= brp_2
;
1379 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1382 | (CTX_byte_addr_select
<< 5)
1384 brp_list
[brp_1
].used
= 1;
1385 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1386 brp_list
[brp_1
].control
= control_CTX
;
1387 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1388 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1389 brp_list
[brp_1
].value
);
1390 if (retval
!= ERROR_OK
)
1392 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1393 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1394 brp_list
[brp_1
].control
);
1395 if (retval
!= ERROR_OK
)
1398 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1401 | (IVA_byte_addr_select
<< 5)
1403 brp_list
[brp_2
].used
= 1;
1404 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1405 brp_list
[brp_2
].control
= control_IVA
;
1406 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1407 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1408 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1409 if (retval
!= ERROR_OK
)
1411 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1412 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1413 brp_list
[brp_2
].value
>> 32);
1414 if (retval
!= ERROR_OK
)
1416 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1417 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1418 brp_list
[brp_2
].control
);
1419 if (retval
!= ERROR_OK
)
1425 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1428 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1429 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1430 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1432 if (!breakpoint
->set
) {
1433 LOG_WARNING("breakpoint not set");
1437 if (breakpoint
->type
== BKPT_HARD
) {
1438 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1439 int brp_i
= breakpoint
->set
- 1;
1440 int brp_j
= breakpoint
->linked_BRP
;
1441 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1442 LOG_DEBUG("Invalid BRP number in breakpoint");
1445 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1446 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1447 brp_list
[brp_i
].used
= 0;
1448 brp_list
[brp_i
].value
= 0;
1449 brp_list
[brp_i
].control
= 0;
1450 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1451 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1452 brp_list
[brp_i
].control
);
1453 if (retval
!= ERROR_OK
)
1455 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1456 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1457 (uint32_t)brp_list
[brp_i
].value
);
1458 if (retval
!= ERROR_OK
)
1460 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1461 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1462 (uint32_t)brp_list
[brp_i
].value
);
1463 if (retval
!= ERROR_OK
)
1465 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1466 LOG_DEBUG("Invalid BRP number in breakpoint");
1469 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1470 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1471 brp_list
[brp_j
].used
= 0;
1472 brp_list
[brp_j
].value
= 0;
1473 brp_list
[brp_j
].control
= 0;
1474 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1475 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1476 brp_list
[brp_j
].control
);
1477 if (retval
!= ERROR_OK
)
1479 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1480 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1481 (uint32_t)brp_list
[brp_j
].value
);
1482 if (retval
!= ERROR_OK
)
1484 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1485 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1486 (uint32_t)brp_list
[brp_j
].value
);
1487 if (retval
!= ERROR_OK
)
1490 breakpoint
->linked_BRP
= 0;
1491 breakpoint
->set
= 0;
1495 int brp_i
= breakpoint
->set
- 1;
1496 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1497 LOG_DEBUG("Invalid BRP number in breakpoint");
1500 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1501 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1502 brp_list
[brp_i
].used
= 0;
1503 brp_list
[brp_i
].value
= 0;
1504 brp_list
[brp_i
].control
= 0;
1505 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1506 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1507 brp_list
[brp_i
].control
);
1508 if (retval
!= ERROR_OK
)
1510 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1511 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1512 brp_list
[brp_i
].value
);
1513 if (retval
!= ERROR_OK
)
1516 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1517 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1518 (uint32_t)brp_list
[brp_i
].value
);
1519 if (retval
!= ERROR_OK
)
1521 breakpoint
->set
= 0;
1525 /* restore original instruction (kept in target endianness) */
1527 armv8_cache_d_inner_flush_virt(armv8
,
1528 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1529 breakpoint
->length
);
1531 if (breakpoint
->length
== 4) {
1532 retval
= target_write_memory(target
,
1533 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1534 4, 1, breakpoint
->orig_instr
);
1535 if (retval
!= ERROR_OK
)
1538 retval
= target_write_memory(target
,
1539 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1540 2, 1, breakpoint
->orig_instr
);
1541 if (retval
!= ERROR_OK
)
1545 armv8_cache_d_inner_flush_virt(armv8
,
1546 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1547 breakpoint
->length
);
1549 armv8_cache_i_inner_inval_virt(armv8
,
1550 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1551 breakpoint
->length
);
1553 breakpoint
->set
= 0;
1558 static int aarch64_add_breakpoint(struct target
*target
,
1559 struct breakpoint
*breakpoint
)
1561 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1563 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1564 LOG_INFO("no hardware breakpoint available");
1565 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1568 if (breakpoint
->type
== BKPT_HARD
)
1569 aarch64
->brp_num_available
--;
1571 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1574 static int aarch64_add_context_breakpoint(struct target
*target
,
1575 struct breakpoint
*breakpoint
)
1577 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1579 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1580 LOG_INFO("no hardware breakpoint available");
1581 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1584 if (breakpoint
->type
== BKPT_HARD
)
1585 aarch64
->brp_num_available
--;
1587 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1590 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1591 struct breakpoint
*breakpoint
)
1593 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1595 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1596 LOG_INFO("no hardware breakpoint available");
1597 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1600 if (breakpoint
->type
== BKPT_HARD
)
1601 aarch64
->brp_num_available
--;
1603 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1607 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1609 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1612 /* It is perfectly possible to remove breakpoints while the target is running */
1613 if (target
->state
!= TARGET_HALTED
) {
1614 LOG_WARNING("target not halted");
1615 return ERROR_TARGET_NOT_HALTED
;
1619 if (breakpoint
->set
) {
1620 aarch64_unset_breakpoint(target
, breakpoint
);
1621 if (breakpoint
->type
== BKPT_HARD
)
1622 aarch64
->brp_num_available
++;
1629 * Cortex-A8 Reset functions
1632 static int aarch64_assert_reset(struct target
*target
)
1634 struct armv8_common
*armv8
= target_to_armv8(target
);
1638 /* FIXME when halt is requested, make it work somehow... */
1640 /* Issue some kind of warm reset. */
1641 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1642 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1643 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1644 /* REVISIT handle "pulls" cases, if there's
1645 * hardware that needs them to work.
1647 jtag_add_reset(0, 1);
1649 LOG_ERROR("%s: how to reset?", target_name(target
));
1653 /* registers are now invalid */
1654 if (target_was_examined(target
)) {
1655 register_cache_invalidate(armv8
->arm
.core_cache
);
1656 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1659 target
->state
= TARGET_RESET
;
1664 static int aarch64_deassert_reset(struct target
*target
)
1670 /* be certain SRST is off */
1671 jtag_add_reset(0, 0);
1673 if (!target_was_examined(target
))
1676 retval
= aarch64_poll(target
);
1677 if (retval
!= ERROR_OK
)
1680 if (target
->reset_halt
) {
1681 if (target
->state
!= TARGET_HALTED
) {
1682 LOG_WARNING("%s: ran after reset and before halt ...",
1683 target_name(target
));
1684 retval
= target_halt(target
);
1685 if (retval
!= ERROR_OK
)
1690 return aarch64_init_debug_access(target
);
1693 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1694 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1696 struct armv8_common
*armv8
= target_to_armv8(target
);
1697 struct arm_dpm
*dpm
= &armv8
->dpm
;
1698 struct arm
*arm
= &armv8
->arm
;
1701 armv8_reg_current(arm
, 1)->dirty
= true;
1703 /* change DCC to normal mode if necessary */
1704 if (*dscr
& DSCR_MA
) {
1706 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1707 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1708 if (retval
!= ERROR_OK
)
1713 uint32_t data
, opcode
;
1715 /* write the data to store into DTRRX */
1719 data
= target_buffer_get_u16(target
, buffer
);
1721 data
= target_buffer_get_u32(target
, buffer
);
1722 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1723 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1724 if (retval
!= ERROR_OK
)
1727 if (arm
->core_state
== ARM_STATE_AARCH64
)
1728 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1730 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1731 if (retval
!= ERROR_OK
)
1735 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1737 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1739 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1740 retval
= dpm
->instr_execute(dpm
, opcode
);
1741 if (retval
!= ERROR_OK
)
1752 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1753 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1755 struct armv8_common
*armv8
= target_to_armv8(target
);
1756 struct arm
*arm
= &armv8
->arm
;
1759 armv8_reg_current(arm
, 1)->dirty
= true;
1761 /* Step 1.d - Change DCC to memory mode */
1763 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1764 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1765 if (retval
!= ERROR_OK
)
1769 /* Step 2.a - Do the write */
1770 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1771 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1772 if (retval
!= ERROR_OK
)
1775 /* Step 3.a - Switch DTR mode back to Normal mode */
1777 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1778 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1779 if (retval
!= ERROR_OK
)
1785 static int aarch64_write_cpu_memory(struct target
*target
,
1786 uint64_t address
, uint32_t size
,
1787 uint32_t count
, const uint8_t *buffer
)
1789 /* write memory through APB-AP */
1790 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1791 struct armv8_common
*armv8
= target_to_armv8(target
);
1792 struct arm_dpm
*dpm
= &armv8
->dpm
;
1793 struct arm
*arm
= &armv8
->arm
;
1796 if (target
->state
!= TARGET_HALTED
) {
1797 LOG_WARNING("target not halted");
1798 return ERROR_TARGET_NOT_HALTED
;
1801 /* Mark register X0 as dirty, as it will be used
1802 * for transferring the data.
1803 * It will be restored automatically when exiting
1806 armv8_reg_current(arm
, 0)->dirty
= true;
1808 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1811 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1812 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1813 if (retval
!= ERROR_OK
)
1816 /* Set Normal access mode */
1817 dscr
= (dscr
& ~DSCR_MA
);
1818 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1819 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1820 if (retval
!= ERROR_OK
)
1823 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1824 /* Write X0 with value 'address' using write procedure */
1825 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1826 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1827 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1828 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1830 /* Write R0 with value 'address' using write procedure */
1831 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1832 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1833 retval
= dpm
->instr_write_data_dcc(dpm
,
1834 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1837 if (retval
!= ERROR_OK
)
1840 if (size
== 4 && (address
% 4) == 0)
1841 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1843 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1845 if (retval
!= ERROR_OK
) {
1846 /* Unset DTR mode */
1847 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1848 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1850 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1851 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1854 /* Check for sticky abort flags in the DSCR */
1855 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1856 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1857 if (retval
!= ERROR_OK
)
1861 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1862 /* Abort occurred - clear it and exit */
1863 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1864 armv8_dpm_handle_exception(dpm
);
1872 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1873 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1875 struct armv8_common
*armv8
= target_to_armv8(target
);
1876 struct arm_dpm
*dpm
= &armv8
->dpm
;
1877 struct arm
*arm
= &armv8
->arm
;
1880 armv8_reg_current(arm
, 1)->dirty
= true;
1882 /* change DCC to normal mode (if necessary) */
1883 if (*dscr
& DSCR_MA
) {
1885 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1886 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1887 if (retval
!= ERROR_OK
)
1892 uint32_t opcode
, data
;
1895 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1897 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1899 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1900 retval
= dpm
->instr_execute(dpm
, opcode
);
1901 if (retval
!= ERROR_OK
)
1904 if (arm
->core_state
== ARM_STATE_AARCH64
)
1905 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1907 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1908 if (retval
!= ERROR_OK
)
1911 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1912 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1913 if (retval
!= ERROR_OK
)
1917 *buffer
= (uint8_t)data
;
1919 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1921 target_buffer_set_u32(target
, buffer
, data
);
1931 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1932 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1934 struct armv8_common
*armv8
= target_to_armv8(target
);
1935 struct arm_dpm
*dpm
= &armv8
->dpm
;
1936 struct arm
*arm
= &armv8
->arm
;
1940 /* Mark X1 as dirty */
1941 armv8_reg_current(arm
, 1)->dirty
= true;
1943 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1944 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1945 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1947 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1948 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1951 if (retval
!= ERROR_OK
)
1954 /* Step 1.e - Change DCC to memory mode */
1956 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1957 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1958 if (retval
!= ERROR_OK
)
1961 /* Step 1.f - read DBGDTRTX and discard the value */
1962 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1963 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1964 if (retval
!= ERROR_OK
)
1968 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1969 * Abort flags are sticky, so can be read at end of transactions
1971 * This data is read in aligned to 32 bit boundary.
1975 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1976 * increments X0 by 4. */
1977 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
1978 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1979 if (retval
!= ERROR_OK
)
1983 /* Step 3.a - set DTR access mode back to Normal mode */
1985 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1986 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1987 if (retval
!= ERROR_OK
)
1990 /* Step 3.b - read DBGDTRTX for the final value */
1991 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1992 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1993 if (retval
!= ERROR_OK
)
1996 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2000 static int aarch64_read_cpu_memory(struct target
*target
,
2001 target_addr_t address
, uint32_t size
,
2002 uint32_t count
, uint8_t *buffer
)
2004 /* read memory through APB-AP */
2005 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2006 struct armv8_common
*armv8
= target_to_armv8(target
);
2007 struct arm_dpm
*dpm
= &armv8
->dpm
;
2008 struct arm
*arm
= &armv8
->arm
;
2011 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2012 address
, size
, count
);
2014 if (target
->state
!= TARGET_HALTED
) {
2015 LOG_WARNING("target not halted");
2016 return ERROR_TARGET_NOT_HALTED
;
2019 /* Mark register X0 as dirty, as it will be used
2020 * for transferring the data.
2021 * It will be restored automatically when exiting
2024 armv8_reg_current(arm
, 0)->dirty
= true;
2027 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2028 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2029 if (retval
!= ERROR_OK
)
2032 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2034 /* Set Normal access mode */
2036 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2037 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2038 if (retval
!= ERROR_OK
)
2041 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2042 /* Write X0 with value 'address' using write procedure */
2043 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2044 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2045 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2046 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2048 /* Write R0 with value 'address' using write procedure */
2049 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2050 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2051 retval
= dpm
->instr_write_data_dcc(dpm
,
2052 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2055 if (retval
!= ERROR_OK
)
2058 if (size
== 4 && (address
% 4) == 0)
2059 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2061 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2063 if (dscr
& DSCR_MA
) {
2065 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2066 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2069 if (retval
!= ERROR_OK
)
2072 /* Check for sticky abort flags in the DSCR */
2073 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2074 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2075 if (retval
!= ERROR_OK
)
2080 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2081 /* Abort occurred - clear it and exit */
2082 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2083 armv8_dpm_handle_exception(dpm
);
2091 static int aarch64_read_phys_memory(struct target
*target
,
2092 target_addr_t address
, uint32_t size
,
2093 uint32_t count
, uint8_t *buffer
)
2095 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2097 if (count
&& buffer
) {
2098 /* read memory through APB-AP */
2099 retval
= aarch64_mmu_modify(target
, 0);
2100 if (retval
!= ERROR_OK
)
2102 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2107 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2108 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2110 int mmu_enabled
= 0;
2113 /* determine if MMU was enabled on target stop */
2114 retval
= aarch64_mmu(target
, &mmu_enabled
);
2115 if (retval
!= ERROR_OK
)
2119 /* enable MMU as we could have disabled it for phys access */
2120 retval
= aarch64_mmu_modify(target
, 1);
2121 if (retval
!= ERROR_OK
)
2124 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2127 static int aarch64_write_phys_memory(struct target
*target
,
2128 target_addr_t address
, uint32_t size
,
2129 uint32_t count
, const uint8_t *buffer
)
2131 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2133 if (count
&& buffer
) {
2134 /* write memory through APB-AP */
2135 retval
= aarch64_mmu_modify(target
, 0);
2136 if (retval
!= ERROR_OK
)
2138 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2144 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2145 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2147 int mmu_enabled
= 0;
2150 /* determine if MMU was enabled on target stop */
2151 retval
= aarch64_mmu(target
, &mmu_enabled
);
2152 if (retval
!= ERROR_OK
)
2156 /* enable MMU as we could have disabled it for phys access */
2157 retval
= aarch64_mmu_modify(target
, 1);
2158 if (retval
!= ERROR_OK
)
2161 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2164 static int aarch64_handle_target_request(void *priv
)
2166 struct target
*target
= priv
;
2167 struct armv8_common
*armv8
= target_to_armv8(target
);
2170 if (!target_was_examined(target
))
2172 if (!target
->dbg_msg_enabled
)
2175 if (target
->state
== TARGET_RUNNING
) {
2178 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2179 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2181 /* check if we have data */
2182 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2183 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2184 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2185 if (retval
== ERROR_OK
) {
2186 target_request(target
, request
);
2187 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2188 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2196 static int aarch64_examine_first(struct target
*target
)
2198 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2199 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2200 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2203 int retval
= ERROR_OK
;
2204 uint64_t debug
, ttypr
;
2206 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2207 debug
= ttypr
= cpuid
= 0;
2209 retval
= dap_dp_init(swjdp
);
2210 if (retval
!= ERROR_OK
)
2213 /* Search for the APB-AB - it is needed for access to debug registers */
2214 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2215 if (retval
!= ERROR_OK
) {
2216 LOG_ERROR("Could not find APB-AP for debug access");
2220 retval
= mem_ap_init(armv8
->debug_ap
);
2221 if (retval
!= ERROR_OK
) {
2222 LOG_ERROR("Could not initialize the APB-AP");
2226 armv8
->debug_ap
->memaccess_tck
= 10;
2228 if (!target
->dbgbase_set
) {
2230 /* Get ROM Table base */
2232 int32_t coreidx
= target
->coreid
;
2233 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2234 if (retval
!= ERROR_OK
)
2236 /* Lookup 0x15 -- Processor DAP */
2237 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2238 &armv8
->debug_base
, &coreidx
);
2239 if (retval
!= ERROR_OK
)
2241 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2242 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2244 armv8
->debug_base
= target
->dbgbase
;
2246 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2247 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2248 if (retval
!= ERROR_OK
) {
2249 LOG_DEBUG("Examine %s failed", "oslock");
2253 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2254 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2255 if (retval
!= ERROR_OK
) {
2256 LOG_DEBUG("Examine %s failed", "CPUID");
2260 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2261 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2262 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2263 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2264 if (retval
!= ERROR_OK
) {
2265 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2268 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2269 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2270 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2271 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2272 if (retval
!= ERROR_OK
) {
2273 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2277 retval
= dap_run(armv8
->debug_ap
->dap
);
2278 if (retval
!= ERROR_OK
) {
2279 LOG_ERROR("%s: examination failed\n", target_name(target
));
2284 ttypr
= (ttypr
<< 32) | tmp0
;
2286 debug
= (debug
<< 32) | tmp2
;
2288 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2289 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2290 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2292 if (target
->ctibase
== 0) {
2293 /* assume a v8 rom table layout */
2294 cti_base
= armv8
->debug_base
+ 0x10000;
2295 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, cti_base
);
2297 cti_base
= target
->ctibase
;
2299 armv8
->cti
= arm_cti_create(armv8
->debug_ap
, cti_base
);
2300 if (armv8
->cti
== NULL
)
2303 retval
= aarch64_dpm_setup(aarch64
, debug
);
2304 if (retval
!= ERROR_OK
)
2307 /* Setup Breakpoint Register Pairs */
2308 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2309 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2310 aarch64
->brp_num_available
= aarch64
->brp_num
;
2311 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2312 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2313 aarch64
->brp_list
[i
].used
= 0;
2314 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2315 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2317 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2318 aarch64
->brp_list
[i
].value
= 0;
2319 aarch64
->brp_list
[i
].control
= 0;
2320 aarch64
->brp_list
[i
].BRPn
= i
;
2323 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2325 target
->state
= TARGET_UNKNOWN
;
2326 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2327 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2328 target_set_examined(target
);
2332 static int aarch64_examine(struct target
*target
)
2334 int retval
= ERROR_OK
;
2336 /* don't re-probe hardware after each reset */
2337 if (!target_was_examined(target
))
2338 retval
= aarch64_examine_first(target
);
2340 /* Configure core debug access */
2341 if (retval
== ERROR_OK
)
2342 retval
= aarch64_init_debug_access(target
);
2348 * Cortex-A8 target creation and initialization
2351 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2352 struct target
*target
)
2354 /* examine_first() does a bunch of this */
2358 static int aarch64_init_arch_info(struct target
*target
,
2359 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2361 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2363 /* Setup struct aarch64_common */
2364 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2365 /* tap has no dap initialized */
2367 tap
->dap
= dap_init();
2368 tap
->dap
->tap
= tap
;
2370 armv8
->arm
.dap
= tap
->dap
;
2372 /* register arch-specific functions */
2373 armv8
->examine_debug_reason
= NULL
;
2374 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2375 armv8
->pre_restore_context
= NULL
;
2376 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2378 armv8_init_arch_info(target
, armv8
);
2379 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2384 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2386 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2388 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2391 static int aarch64_mmu(struct target
*target
, int *enabled
)
2393 if (target
->state
!= TARGET_HALTED
) {
2394 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2395 return ERROR_TARGET_INVALID
;
2398 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2402 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2403 target_addr_t
*phys
)
2405 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2408 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2410 struct target
*target
= get_current_target(CMD_CTX
);
2411 struct armv8_common
*armv8
= target_to_armv8(target
);
2413 return armv8_handle_cache_info_command(CMD_CTX
,
2414 &armv8
->armv8_mmu
.armv8_cache
);
2418 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2420 struct target
*target
= get_current_target(CMD_CTX
);
2421 if (!target_was_examined(target
)) {
2422 LOG_ERROR("target not examined yet");
2426 return aarch64_init_debug_access(target
);
2428 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2430 struct target
*target
= get_current_target(CMD_CTX
);
2431 /* check target is an smp target */
2432 struct target_list
*head
;
2433 struct target
*curr
;
2434 head
= target
->head
;
2436 if (head
!= (struct target_list
*)NULL
) {
2437 while (head
!= (struct target_list
*)NULL
) {
2438 curr
= head
->target
;
2442 /* fixes the target display to the debugger */
2443 target
->gdb_service
->target
= target
;
2448 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2450 struct target
*target
= get_current_target(CMD_CTX
);
2451 struct target_list
*head
;
2452 struct target
*curr
;
2453 head
= target
->head
;
2454 if (head
!= (struct target_list
*)NULL
) {
2456 while (head
!= (struct target_list
*)NULL
) {
2457 curr
= head
->target
;
2465 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2467 struct target
*target
= get_current_target(CMD_CTX
);
2468 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2470 static const Jim_Nvp nvp_maskisr_modes
[] = {
2471 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2472 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2473 { .name
= NULL
, .value
= -1 },
2478 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2479 if (n
->name
== NULL
) {
2480 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2481 return ERROR_COMMAND_SYNTAX_ERROR
;
2484 aarch64
->isrmasking_mode
= n
->value
;
2487 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2488 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2493 static const struct command_registration aarch64_exec_command_handlers
[] = {
2495 .name
= "cache_info",
2496 .handler
= aarch64_handle_cache_info_command
,
2497 .mode
= COMMAND_EXEC
,
2498 .help
= "display information about target caches",
2503 .handler
= aarch64_handle_dbginit_command
,
2504 .mode
= COMMAND_EXEC
,
2505 .help
= "Initialize core debug",
2508 { .name
= "smp_off",
2509 .handler
= aarch64_handle_smp_off_command
,
2510 .mode
= COMMAND_EXEC
,
2511 .help
= "Stop smp handling",
2516 .handler
= aarch64_handle_smp_on_command
,
2517 .mode
= COMMAND_EXEC
,
2518 .help
= "Restart smp handling",
2523 .handler
= aarch64_mask_interrupts_command
,
2524 .mode
= COMMAND_ANY
,
2525 .help
= "mask aarch64 interrupts during single-step",
2526 .usage
= "['on'|'off']",
2529 COMMAND_REGISTRATION_DONE
2531 static const struct command_registration aarch64_command_handlers
[] = {
2533 .chain
= armv8_command_handlers
,
2537 .mode
= COMMAND_ANY
,
2538 .help
= "Aarch64 command group",
2540 .chain
= aarch64_exec_command_handlers
,
2542 COMMAND_REGISTRATION_DONE
2545 struct target_type aarch64_target
= {
2548 .poll
= aarch64_poll
,
2549 .arch_state
= armv8_arch_state
,
2551 .halt
= aarch64_halt
,
2552 .resume
= aarch64_resume
,
2553 .step
= aarch64_step
,
2555 .assert_reset
= aarch64_assert_reset
,
2556 .deassert_reset
= aarch64_deassert_reset
,
2558 /* REVISIT allow exporting VFP3 registers ... */
2559 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2561 .read_memory
= aarch64_read_memory
,
2562 .write_memory
= aarch64_write_memory
,
2564 .add_breakpoint
= aarch64_add_breakpoint
,
2565 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2566 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2567 .remove_breakpoint
= aarch64_remove_breakpoint
,
2568 .add_watchpoint
= NULL
,
2569 .remove_watchpoint
= NULL
,
2571 .commands
= aarch64_command_handlers
,
2572 .target_create
= aarch64_target_create
,
2573 .init_target
= aarch64_init_target
,
2574 .examine
= aarch64_examine
,
2576 .read_phys_memory
= aarch64_read_phys_memory
,
2577 .write_phys_memory
= aarch64_write_phys_memory
,
2579 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)