1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include "jtag/interface.h"
34 #include <helper/time_support.h>
46 struct aarch64_private_config
{
47 struct adiv5_private_config adiv5_config
;
51 static int aarch64_poll(struct target
*target
);
52 static int aarch64_debug_entry(struct target
*target
);
53 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
54 static int aarch64_set_breakpoint(struct target
*target
,
55 struct breakpoint
*breakpoint
, uint8_t matchmode
);
56 static int aarch64_set_context_breakpoint(struct target
*target
,
57 struct breakpoint
*breakpoint
, uint8_t matchmode
);
58 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
59 struct breakpoint
*breakpoint
);
60 static int aarch64_unset_breakpoint(struct target
*target
,
61 struct breakpoint
*breakpoint
);
62 static int aarch64_mmu(struct target
*target
, int *enabled
);
63 static int aarch64_virt2phys(struct target
*target
,
64 target_addr_t virt
, target_addr_t
*phys
);
65 static int aarch64_read_cpu_memory(struct target
*target
,
66 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
68 static int aarch64_restore_system_control_reg(struct target
*target
)
70 enum arm_mode target_mode
= ARM_MODE_ANY
;
71 int retval
= ERROR_OK
;
74 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
75 struct armv8_common
*armv8
= target_to_armv8(target
);
77 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
78 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
79 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81 switch (armv8
->arm
.core_mode
) {
83 target_mode
= ARMV8_64_EL1H
;
87 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
91 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
95 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
103 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
107 LOG_INFO("cannot read system control register in this mode");
111 if (target_mode
!= ARM_MODE_ANY
)
112 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
114 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
115 if (retval
!= ERROR_OK
)
118 if (target_mode
!= ARM_MODE_ANY
)
119 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
125 /* modify system_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int aarch64_mmu_modify(struct target
*target
, int enable
)
130 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
131 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
132 int retval
= ERROR_OK
;
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(aarch64
->system_control_reg
& 0x1U
)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
141 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
142 aarch64
->system_control_reg_curr
|= 0x1U
;
144 if (aarch64
->system_control_reg_curr
& 0x4U
) {
145 /* data cache is active */
146 aarch64
->system_control_reg_curr
&= ~0x4U
;
147 /* flush data cache armv8 function to be called */
148 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
149 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
151 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
152 aarch64
->system_control_reg_curr
&= ~0x1U
;
156 switch (armv8
->arm
.core_mode
) {
160 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
164 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
168 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
176 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
180 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
184 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
185 aarch64
->system_control_reg_curr
);
190 * Basic debug access, very low level assumes state is saved
192 static int aarch64_init_debug_access(struct target
*target
)
194 struct armv8_common
*armv8
= target_to_armv8(target
);
198 LOG_DEBUG("%s", target_name(target
));
200 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
201 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
202 if (retval
!= ERROR_OK
) {
203 LOG_DEBUG("Examine %s failed", "oslock");
207 /* Clear Sticky Power Down status Bit in PRSR to enable access to
208 the registers in the Core Power Domain */
209 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
210 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
211 if (retval
!= ERROR_OK
)
215 * Static CTI configuration:
216 * Channel 0 -> trigger outputs HALT request to PE
217 * Channel 1 -> trigger outputs Resume request to PE
218 * Gate all channel trigger events from entering the CTM
222 retval
= arm_cti_enable(armv8
->cti
, true);
223 /* By default, gate all channel events to and from the CTM */
224 if (retval
== ERROR_OK
)
225 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
226 /* output halt requests to PE on channel 0 event */
227 if (retval
== ERROR_OK
)
228 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
229 /* output restart requests to PE on channel 1 event */
230 if (retval
== ERROR_OK
)
231 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
232 if (retval
!= ERROR_OK
)
235 /* Resync breakpoint registers */
240 /* Write to memory mapped registers directly with no cache or mmu handling */
241 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
246 struct armv8_common
*armv8
= target_to_armv8(target
);
248 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
253 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
255 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
258 dpm
->arm
= &a8
->armv8_common
.arm
;
261 retval
= armv8_dpm_setup(dpm
);
262 if (retval
== ERROR_OK
)
263 retval
= armv8_dpm_initialize(dpm
);
268 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
270 struct armv8_common
*armv8
= target_to_armv8(target
);
271 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
274 static int aarch64_check_state_one(struct target
*target
,
275 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
277 struct armv8_common
*armv8
= target_to_armv8(target
);
281 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
282 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
283 if (retval
!= ERROR_OK
)
290 *p_result
= (prsr
& mask
) == (val
& mask
);
295 static int aarch64_wait_halt_one(struct target
*target
)
297 int retval
= ERROR_OK
;
300 int64_t then
= timeval_ms();
304 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
305 if (retval
!= ERROR_OK
|| halted
)
308 if (timeval_ms() > then
+ 1000) {
309 retval
= ERROR_TARGET_TIMEOUT
;
310 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
317 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
319 int retval
= ERROR_OK
;
320 struct target_list
*head
= target
->head
;
321 struct target
*first
= NULL
;
323 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
325 while (head
!= NULL
) {
326 struct target
*curr
= head
->target
;
327 struct armv8_common
*armv8
= target_to_armv8(curr
);
330 if (exc_target
&& curr
== target
)
332 if (!target_was_examined(curr
))
334 if (curr
->state
!= TARGET_RUNNING
)
337 /* HACK: mark this target as prepared for halting */
338 curr
->debug_reason
= DBG_REASON_DBGRQ
;
340 /* open the gate for channel 0 to let HALT requests pass to the CTM */
341 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
342 if (retval
== ERROR_OK
)
343 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
344 if (retval
!= ERROR_OK
)
347 LOG_DEBUG("target %s prepared", target_name(curr
));
354 if (exc_target
&& first
)
363 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
365 int retval
= ERROR_OK
;
366 struct armv8_common
*armv8
= target_to_armv8(target
);
368 LOG_DEBUG("%s", target_name(target
));
370 /* allow Halting Debug Mode */
371 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
372 if (retval
!= ERROR_OK
)
375 /* trigger an event on channel 0, this outputs a halt request to the PE */
376 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
377 if (retval
!= ERROR_OK
)
380 if (mode
== HALT_SYNC
) {
381 retval
= aarch64_wait_halt_one(target
);
382 if (retval
!= ERROR_OK
) {
383 if (retval
== ERROR_TARGET_TIMEOUT
)
384 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
392 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
394 struct target
*next
= target
;
397 /* prepare halt on all PEs of the group */
398 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
400 if (exc_target
&& next
== target
)
403 /* halt the target PE */
404 if (retval
== ERROR_OK
)
405 retval
= aarch64_halt_one(next
, HALT_LAZY
);
407 if (retval
!= ERROR_OK
)
410 /* wait for all PEs to halt */
411 int64_t then
= timeval_ms();
413 bool all_halted
= true;
414 struct target_list
*head
;
417 foreach_smp_target(head
, target
->head
) {
422 if (!target_was_examined(curr
))
425 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
426 if (retval
!= ERROR_OK
|| !halted
) {
435 if (timeval_ms() > then
+ 1000) {
436 retval
= ERROR_TARGET_TIMEOUT
;
441 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
442 * and it looks like the CTI's are not connected by a common
443 * trigger matrix. It seems that we need to halt one core in each
444 * cluster explicitly. So if we find that a core has not halted
445 * yet, we trigger an explicit halt for the second cluster.
447 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
448 if (retval
!= ERROR_OK
)
455 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
457 struct target
*gdb_target
= NULL
;
458 struct target_list
*head
;
461 if (debug_reason
== DBG_REASON_NOTHALTED
) {
462 LOG_DEBUG("Halting remaining targets in SMP group");
463 aarch64_halt_smp(target
, true);
466 /* poll all targets in the group, but skip the target that serves GDB */
467 foreach_smp_target(head
, target
->head
) {
469 /* skip calling context */
472 if (!target_was_examined(curr
))
474 /* skip targets that were already halted */
475 if (curr
->state
== TARGET_HALTED
)
477 /* remember the gdb_service->target */
478 if (curr
->gdb_service
!= NULL
)
479 gdb_target
= curr
->gdb_service
->target
;
481 if (curr
== gdb_target
)
484 /* avoid recursion in aarch64_poll() */
490 /* after all targets were updated, poll the gdb serving target */
491 if (gdb_target
!= NULL
&& gdb_target
!= target
)
492 aarch64_poll(gdb_target
);
498 * Aarch64 Run control
501 static int aarch64_poll(struct target
*target
)
503 enum target_state prev_target_state
;
504 int retval
= ERROR_OK
;
507 retval
= aarch64_check_state_one(target
,
508 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
509 if (retval
!= ERROR_OK
)
513 prev_target_state
= target
->state
;
514 if (prev_target_state
!= TARGET_HALTED
) {
515 enum target_debug_reason debug_reason
= target
->debug_reason
;
517 /* We have a halting debug event */
518 target
->state
= TARGET_HALTED
;
519 LOG_DEBUG("Target %s halted", target_name(target
));
520 retval
= aarch64_debug_entry(target
);
521 if (retval
!= ERROR_OK
)
525 update_halt_gdb(target
, debug_reason
);
527 if (arm_semihosting(target
, &retval
) != 0)
530 switch (prev_target_state
) {
534 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
536 case TARGET_DEBUG_RUNNING
:
537 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
544 target
->state
= TARGET_RUNNING
;
549 static int aarch64_halt(struct target
*target
)
551 struct armv8_common
*armv8
= target_to_armv8(target
);
552 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
555 return aarch64_halt_smp(target
, false);
557 return aarch64_halt_one(target
, HALT_SYNC
);
560 static int aarch64_restore_one(struct target
*target
, int current
,
561 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
563 struct armv8_common
*armv8
= target_to_armv8(target
);
564 struct arm
*arm
= &armv8
->arm
;
568 LOG_DEBUG("%s", target_name(target
));
570 if (!debug_execution
)
571 target_free_all_working_areas(target
);
573 /* current = 1: continue on current pc, otherwise continue at <address> */
574 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
576 resume_pc
= *address
;
578 *address
= resume_pc
;
580 /* Make sure that the Armv7 gdb thumb fixups does not
581 * kill the return address
583 switch (arm
->core_state
) {
585 resume_pc
&= 0xFFFFFFFC;
587 case ARM_STATE_AARCH64
:
588 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
590 case ARM_STATE_THUMB
:
591 case ARM_STATE_THUMB_EE
:
592 /* When the return address is loaded into PC
593 * bit 0 must be 1 to stay in Thumb state
597 case ARM_STATE_JAZELLE
:
598 LOG_ERROR("How do I resume into Jazelle state??");
601 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
602 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
603 arm
->pc
->dirty
= true;
604 arm
->pc
->valid
= true;
606 /* called it now before restoring context because it uses cpu
607 * register r0 for restoring system control register */
608 retval
= aarch64_restore_system_control_reg(target
);
609 if (retval
== ERROR_OK
)
610 retval
= aarch64_restore_context(target
, handle_breakpoints
);
616 * prepare single target for restart
620 static int aarch64_prepare_restart_one(struct target
*target
)
622 struct armv8_common
*armv8
= target_to_armv8(target
);
627 LOG_DEBUG("%s", target_name(target
));
629 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
630 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
631 if (retval
!= ERROR_OK
)
634 if ((dscr
& DSCR_ITE
) == 0)
635 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
636 if ((dscr
& DSCR_ERR
) != 0)
637 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
639 /* acknowledge a pending CTI halt event */
640 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
642 * open the CTI gate for channel 1 so that the restart events
643 * get passed along to all PEs. Also close gate for channel 0
644 * to isolate the PE from halt events.
646 if (retval
== ERROR_OK
)
647 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
648 if (retval
== ERROR_OK
)
649 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
651 /* make sure that DSCR.HDE is set */
652 if (retval
== ERROR_OK
) {
654 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
655 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
658 if (retval
== ERROR_OK
) {
659 /* clear sticky bits in PRSR, SDR is now 0 */
660 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
661 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
667 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
669 struct armv8_common
*armv8
= target_to_armv8(target
);
672 LOG_DEBUG("%s", target_name(target
));
674 /* trigger an event on channel 1, generates a restart request to the PE */
675 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
676 if (retval
!= ERROR_OK
)
679 if (mode
== RESTART_SYNC
) {
680 int64_t then
= timeval_ms();
684 * if PRSR.SDR is set now, the target did restart, even
685 * if it's now already halted again (e.g. due to breakpoint)
687 retval
= aarch64_check_state_one(target
,
688 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
689 if (retval
!= ERROR_OK
|| resumed
)
692 if (timeval_ms() > then
+ 1000) {
693 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
694 retval
= ERROR_TARGET_TIMEOUT
;
700 if (retval
!= ERROR_OK
)
703 target
->debug_reason
= DBG_REASON_NOTHALTED
;
704 target
->state
= TARGET_RUNNING
;
709 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
713 LOG_DEBUG("%s", target_name(target
));
715 retval
= aarch64_prepare_restart_one(target
);
716 if (retval
== ERROR_OK
)
717 retval
= aarch64_do_restart_one(target
, mode
);
723 * prepare all but the current target for restart
725 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
727 int retval
= ERROR_OK
;
728 struct target_list
*head
;
729 struct target
*first
= NULL
;
732 foreach_smp_target(head
, target
->head
) {
733 struct target
*curr
= head
->target
;
735 /* skip calling target */
738 if (!target_was_examined(curr
))
740 if (curr
->state
!= TARGET_HALTED
)
743 /* resume at current address, not in step mode */
744 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
745 if (retval
== ERROR_OK
)
746 retval
= aarch64_prepare_restart_one(curr
);
747 if (retval
!= ERROR_OK
) {
748 LOG_ERROR("failed to restore target %s", target_name(curr
));
751 /* remember the first valid target in the group */
763 static int aarch64_step_restart_smp(struct target
*target
)
765 int retval
= ERROR_OK
;
766 struct target_list
*head
;
767 struct target
*first
= NULL
;
769 LOG_DEBUG("%s", target_name(target
));
771 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
772 if (retval
!= ERROR_OK
)
776 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
777 if (retval
!= ERROR_OK
) {
778 LOG_DEBUG("error restarting target %s", target_name(first
));
782 int64_t then
= timeval_ms();
784 struct target
*curr
= target
;
785 bool all_resumed
= true;
787 foreach_smp_target(head
, target
->head
) {
796 if (!target_was_examined(curr
))
799 retval
= aarch64_check_state_one(curr
,
800 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
801 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
806 if (curr
->state
!= TARGET_RUNNING
) {
807 curr
->state
= TARGET_RUNNING
;
808 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
809 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
816 if (timeval_ms() > then
+ 1000) {
817 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
818 retval
= ERROR_TARGET_TIMEOUT
;
822 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
823 * and it looks like the CTI's are not connected by a common
824 * trigger matrix. It seems that we need to halt one core in each
825 * cluster explicitly. So if we find that a core has not halted
826 * yet, we trigger an explicit resume for the second cluster.
828 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
829 if (retval
!= ERROR_OK
)
836 static int aarch64_resume(struct target
*target
, int current
,
837 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
840 uint64_t addr
= address
;
842 struct armv8_common
*armv8
= target_to_armv8(target
);
843 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
845 if (target
->state
!= TARGET_HALTED
)
846 return ERROR_TARGET_NOT_HALTED
;
849 * If this target is part of a SMP group, prepare the others
850 * targets for resuming. This involves restoring the complete
851 * target register context and setting up CTI gates to accept
852 * resume events from the trigger matrix.
855 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
856 if (retval
!= ERROR_OK
)
860 /* all targets prepared, restore and restart the current target */
861 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
863 if (retval
== ERROR_OK
)
864 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
865 if (retval
!= ERROR_OK
)
869 int64_t then
= timeval_ms();
871 struct target
*curr
= target
;
872 struct target_list
*head
;
873 bool all_resumed
= true;
875 foreach_smp_target(head
, target
->head
) {
882 if (!target_was_examined(curr
))
885 retval
= aarch64_check_state_one(curr
,
886 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
887 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
892 if (curr
->state
!= TARGET_RUNNING
) {
893 curr
->state
= TARGET_RUNNING
;
894 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
895 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
902 if (timeval_ms() > then
+ 1000) {
903 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
904 retval
= ERROR_TARGET_TIMEOUT
;
909 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
910 * and it looks like the CTI's are not connected by a common
911 * trigger matrix. It seems that we need to halt one core in each
912 * cluster explicitly. So if we find that a core has not halted
913 * yet, we trigger an explicit resume for the second cluster.
915 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
916 if (retval
!= ERROR_OK
)
921 if (retval
!= ERROR_OK
)
924 target
->debug_reason
= DBG_REASON_NOTHALTED
;
926 if (!debug_execution
) {
927 target
->state
= TARGET_RUNNING
;
928 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
929 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
931 target
->state
= TARGET_DEBUG_RUNNING
;
932 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
933 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
939 static int aarch64_debug_entry(struct target
*target
)
941 int retval
= ERROR_OK
;
942 struct armv8_common
*armv8
= target_to_armv8(target
);
943 struct arm_dpm
*dpm
= &armv8
->dpm
;
944 enum arm_state core_state
;
947 /* make sure to clear all sticky errors */
948 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
949 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
950 if (retval
== ERROR_OK
)
951 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
952 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
953 if (retval
== ERROR_OK
)
954 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
956 if (retval
!= ERROR_OK
)
959 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
962 core_state
= armv8_dpm_get_core_state(dpm
);
963 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
964 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
966 /* close the CTI gate for all events */
967 if (retval
== ERROR_OK
)
968 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
969 /* discard async exceptions */
970 if (retval
== ERROR_OK
)
971 retval
= dpm
->instr_cpsr_sync(dpm
);
972 if (retval
!= ERROR_OK
)
975 /* Examine debug reason */
976 armv8_dpm_report_dscr(dpm
, dscr
);
978 /* save address of instruction that triggered the watchpoint? */
979 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
983 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
984 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
986 if (retval
!= ERROR_OK
)
990 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
991 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
993 if (retval
!= ERROR_OK
)
996 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
999 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1001 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1002 retval
= armv8
->post_debug_entry(target
);
1007 static int aarch64_post_debug_entry(struct target
*target
)
1009 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1010 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1012 enum arm_mode target_mode
= ARM_MODE_ANY
;
1015 switch (armv8
->arm
.core_mode
) {
1017 target_mode
= ARMV8_64_EL1H
;
1021 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1025 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1029 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1037 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1041 LOG_INFO("cannot read system control register in this mode");
1045 if (target_mode
!= ARM_MODE_ANY
)
1046 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1048 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1049 if (retval
!= ERROR_OK
)
1052 if (target_mode
!= ARM_MODE_ANY
)
1053 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1055 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1056 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1058 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1059 armv8_identify_cache(armv8
);
1060 armv8_read_mpidr(armv8
);
1063 armv8
->armv8_mmu
.mmu_enabled
=
1064 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1065 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1066 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1067 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1068 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1073 * single-step a target
1075 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1076 int handle_breakpoints
)
1078 struct armv8_common
*armv8
= target_to_armv8(target
);
1079 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1080 int saved_retval
= ERROR_OK
;
1084 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1086 if (target
->state
!= TARGET_HALTED
) {
1087 LOG_WARNING("target not halted");
1088 return ERROR_TARGET_NOT_HALTED
;
1091 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1092 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1093 /* make sure EDECR.SS is not set when restoring the register */
1095 if (retval
== ERROR_OK
) {
1097 /* set EDECR.SS to enter hardware step mode */
1098 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1099 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1101 /* disable interrupts while stepping */
1102 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1103 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1104 /* bail out if stepping setup has failed */
1105 if (retval
!= ERROR_OK
)
1108 if (target
->smp
&& (current
== 1)) {
1110 * isolate current target so that it doesn't get resumed
1111 * together with the others
1113 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1114 /* resume all other targets in the group */
1115 if (retval
== ERROR_OK
)
1116 retval
= aarch64_step_restart_smp(target
);
1117 if (retval
!= ERROR_OK
) {
1118 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1121 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1124 /* all other targets running, restore and restart the current target */
1125 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1126 if (retval
== ERROR_OK
)
1127 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1129 if (retval
!= ERROR_OK
)
1132 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1133 if (!handle_breakpoints
)
1134 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1136 int64_t then
= timeval_ms();
1141 retval
= aarch64_check_state_one(target
,
1142 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1143 if (retval
!= ERROR_OK
|| stepped
)
1146 if (timeval_ms() > then
+ 100) {
1147 LOG_ERROR("timeout waiting for target %s halt after step",
1148 target_name(target
));
1149 retval
= ERROR_TARGET_TIMEOUT
;
1155 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1156 * causes a timeout. The core takes the step but doesn't complete it and so
1157 * debug state is never entered. However, you can manually halt the core
1158 * as an external debug even is also a WFI wakeup event.
1160 if (retval
== ERROR_TARGET_TIMEOUT
)
1161 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1164 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1165 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1166 if (retval
!= ERROR_OK
)
1169 /* restore interrupts */
1170 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1171 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1172 if (retval
!= ERROR_OK
)
1176 if (saved_retval
!= ERROR_OK
)
1177 return saved_retval
;
1182 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1184 struct armv8_common
*armv8
= target_to_armv8(target
);
1185 struct arm
*arm
= &armv8
->arm
;
1189 LOG_DEBUG("%s", target_name(target
));
1191 if (armv8
->pre_restore_context
)
1192 armv8
->pre_restore_context(target
);
1194 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1195 if (retval
== ERROR_OK
) {
1196 /* registers are now invalid */
1197 register_cache_invalidate(arm
->core_cache
);
1198 register_cache_invalidate(arm
->core_cache
->next
);
1205 * Cortex-A8 Breakpoint and watchpoint functions
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int aarch64_set_breakpoint(struct target
*target
,
1210 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1215 uint8_t byte_addr_select
= 0x0F;
1216 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1217 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1218 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1220 if (breakpoint
->set
) {
1221 LOG_WARNING("breakpoint already set");
1225 if (breakpoint
->type
== BKPT_HARD
) {
1227 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1229 if (brp_i
>= aarch64
->brp_num
) {
1230 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1231 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1233 breakpoint
->set
= brp_i
+ 1;
1234 if (breakpoint
->length
== 2)
1235 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1236 control
= ((matchmode
& 0x7) << 20)
1238 | (byte_addr_select
<< 5)
1240 brp_list
[brp_i
].used
= 1;
1241 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1242 brp_list
[brp_i
].control
= control
;
1243 bpt_value
= brp_list
[brp_i
].value
;
1245 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1246 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1247 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1248 if (retval
!= ERROR_OK
)
1250 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1251 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1252 (uint32_t)(bpt_value
>> 32));
1253 if (retval
!= ERROR_OK
)
1256 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1257 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1258 brp_list
[brp_i
].control
);
1259 if (retval
!= ERROR_OK
)
1261 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1262 brp_list
[brp_i
].control
,
1263 brp_list
[brp_i
].value
);
1265 } else if (breakpoint
->type
== BKPT_SOFT
) {
1269 if (armv8_dpm_get_core_state(&armv8
->dpm
) == ARM_STATE_AARCH64
) {
1270 opcode
= ARMV8_HLT(11);
1272 if (breakpoint
->length
!= 4)
1273 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1276 * core_state is ARM_STATE_ARM
1277 * in that case the opcode depends on breakpoint length:
1278 * - if length == 4 => A32 opcode
1279 * - if length == 2 => T32 opcode
1280 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1281 * in that case the length should be changed from 3 to 4 bytes
1283 opcode
= (breakpoint
->length
== 4) ? ARMV8_HLT_A1(11) :
1284 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1286 if (breakpoint
->length
== 3)
1287 breakpoint
->length
= 4;
1290 buf_set_u32(code
, 0, 32, opcode
);
1292 retval
= target_read_memory(target
,
1293 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1294 breakpoint
->length
, 1,
1295 breakpoint
->orig_instr
);
1296 if (retval
!= ERROR_OK
)
1299 armv8_cache_d_inner_flush_virt(armv8
,
1300 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1301 breakpoint
->length
);
1303 retval
= target_write_memory(target
,
1304 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1305 breakpoint
->length
, 1, code
);
1306 if (retval
!= ERROR_OK
)
1309 armv8_cache_d_inner_flush_virt(armv8
,
1310 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1311 breakpoint
->length
);
1313 armv8_cache_i_inner_inval_virt(armv8
,
1314 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1315 breakpoint
->length
);
1317 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1320 /* Ensure that halting debug mode is enable */
1321 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1322 if (retval
!= ERROR_OK
) {
1323 LOG_DEBUG("Failed to set DSCR.HDE");
1330 static int aarch64_set_context_breakpoint(struct target
*target
,
1331 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1333 int retval
= ERROR_FAIL
;
1336 uint8_t byte_addr_select
= 0x0F;
1337 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1338 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1339 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1341 if (breakpoint
->set
) {
1342 LOG_WARNING("breakpoint already set");
1345 /*check available context BRPs*/
1346 while ((brp_list
[brp_i
].used
||
1347 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1350 if (brp_i
>= aarch64
->brp_num
) {
1351 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1355 breakpoint
->set
= brp_i
+ 1;
1356 control
= ((matchmode
& 0x7) << 20)
1358 | (byte_addr_select
<< 5)
1360 brp_list
[brp_i
].used
= 1;
1361 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1362 brp_list
[brp_i
].control
= control
;
1363 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1364 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1365 brp_list
[brp_i
].value
);
1366 if (retval
!= ERROR_OK
)
1368 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1369 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1370 brp_list
[brp_i
].control
);
1371 if (retval
!= ERROR_OK
)
1373 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1374 brp_list
[brp_i
].control
,
1375 brp_list
[brp_i
].value
);
1380 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1382 int retval
= ERROR_FAIL
;
1383 int brp_1
= 0; /* holds the contextID pair */
1384 int brp_2
= 0; /* holds the IVA pair */
1385 uint32_t control_CTX
, control_IVA
;
1386 uint8_t CTX_byte_addr_select
= 0x0F;
1387 uint8_t IVA_byte_addr_select
= 0x0F;
1388 uint8_t CTX_machmode
= 0x03;
1389 uint8_t IVA_machmode
= 0x01;
1390 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1391 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1392 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1394 if (breakpoint
->set
) {
1395 LOG_WARNING("breakpoint already set");
1398 /*check available context BRPs*/
1399 while ((brp_list
[brp_1
].used
||
1400 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1403 printf("brp(CTX) found num: %d\n", brp_1
);
1404 if (brp_1
>= aarch64
->brp_num
) {
1405 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1409 while ((brp_list
[brp_2
].used
||
1410 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1413 printf("brp(IVA) found num: %d\n", brp_2
);
1414 if (brp_2
>= aarch64
->brp_num
) {
1415 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1419 breakpoint
->set
= brp_1
+ 1;
1420 breakpoint
->linked_BRP
= brp_2
;
1421 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1424 | (CTX_byte_addr_select
<< 5)
1426 brp_list
[brp_1
].used
= 1;
1427 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1428 brp_list
[brp_1
].control
= control_CTX
;
1429 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1430 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1431 brp_list
[brp_1
].value
);
1432 if (retval
!= ERROR_OK
)
1434 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1435 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1436 brp_list
[brp_1
].control
);
1437 if (retval
!= ERROR_OK
)
1440 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1443 | (IVA_byte_addr_select
<< 5)
1445 brp_list
[brp_2
].used
= 1;
1446 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1447 brp_list
[brp_2
].control
= control_IVA
;
1448 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1449 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1450 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1451 if (retval
!= ERROR_OK
)
1453 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1454 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1455 brp_list
[brp_2
].value
>> 32);
1456 if (retval
!= ERROR_OK
)
1458 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1459 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1460 brp_list
[brp_2
].control
);
1461 if (retval
!= ERROR_OK
)
1467 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1470 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1471 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1472 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1474 if (!breakpoint
->set
) {
1475 LOG_WARNING("breakpoint not set");
1479 if (breakpoint
->type
== BKPT_HARD
) {
1480 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1481 int brp_i
= breakpoint
->set
- 1;
1482 int brp_j
= breakpoint
->linked_BRP
;
1483 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1484 LOG_DEBUG("Invalid BRP number in breakpoint");
1487 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1488 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1489 brp_list
[brp_i
].used
= 0;
1490 brp_list
[brp_i
].value
= 0;
1491 brp_list
[brp_i
].control
= 0;
1492 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1493 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1494 brp_list
[brp_i
].control
);
1495 if (retval
!= ERROR_OK
)
1497 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1498 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1499 (uint32_t)brp_list
[brp_i
].value
);
1500 if (retval
!= ERROR_OK
)
1502 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1503 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1504 (uint32_t)brp_list
[brp_i
].value
);
1505 if (retval
!= ERROR_OK
)
1507 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1508 LOG_DEBUG("Invalid BRP number in breakpoint");
1511 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1512 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1513 brp_list
[brp_j
].used
= 0;
1514 brp_list
[brp_j
].value
= 0;
1515 brp_list
[brp_j
].control
= 0;
1516 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1517 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1518 brp_list
[brp_j
].control
);
1519 if (retval
!= ERROR_OK
)
1521 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1522 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1523 (uint32_t)brp_list
[brp_j
].value
);
1524 if (retval
!= ERROR_OK
)
1526 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1527 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1528 (uint32_t)brp_list
[brp_j
].value
);
1529 if (retval
!= ERROR_OK
)
1532 breakpoint
->linked_BRP
= 0;
1533 breakpoint
->set
= 0;
1537 int brp_i
= breakpoint
->set
- 1;
1538 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1539 LOG_DEBUG("Invalid BRP number in breakpoint");
1542 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1543 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1544 brp_list
[brp_i
].used
= 0;
1545 brp_list
[brp_i
].value
= 0;
1546 brp_list
[brp_i
].control
= 0;
1547 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1548 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1549 brp_list
[brp_i
].control
);
1550 if (retval
!= ERROR_OK
)
1552 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1553 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1554 brp_list
[brp_i
].value
);
1555 if (retval
!= ERROR_OK
)
1558 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1559 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1560 (uint32_t)brp_list
[brp_i
].value
);
1561 if (retval
!= ERROR_OK
)
1563 breakpoint
->set
= 0;
1567 /* restore original instruction (kept in target endianness) */
1569 armv8_cache_d_inner_flush_virt(armv8
,
1570 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1571 breakpoint
->length
);
1573 if (breakpoint
->length
== 4) {
1574 retval
= target_write_memory(target
,
1575 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1576 4, 1, breakpoint
->orig_instr
);
1577 if (retval
!= ERROR_OK
)
1580 retval
= target_write_memory(target
,
1581 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1582 2, 1, breakpoint
->orig_instr
);
1583 if (retval
!= ERROR_OK
)
1587 armv8_cache_d_inner_flush_virt(armv8
,
1588 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1589 breakpoint
->length
);
1591 armv8_cache_i_inner_inval_virt(armv8
,
1592 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1593 breakpoint
->length
);
1595 breakpoint
->set
= 0;
1600 static int aarch64_add_breakpoint(struct target
*target
,
1601 struct breakpoint
*breakpoint
)
1603 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1605 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1606 LOG_INFO("no hardware breakpoint available");
1607 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1610 if (breakpoint
->type
== BKPT_HARD
)
1611 aarch64
->brp_num_available
--;
1613 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1616 static int aarch64_add_context_breakpoint(struct target
*target
,
1617 struct breakpoint
*breakpoint
)
1619 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1621 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1622 LOG_INFO("no hardware breakpoint available");
1623 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1626 if (breakpoint
->type
== BKPT_HARD
)
1627 aarch64
->brp_num_available
--;
1629 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1632 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1633 struct breakpoint
*breakpoint
)
1635 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1637 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1638 LOG_INFO("no hardware breakpoint available");
1639 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1642 if (breakpoint
->type
== BKPT_HARD
)
1643 aarch64
->brp_num_available
--;
1645 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1649 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1651 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1654 /* It is perfectly possible to remove breakpoints while the target is running */
1655 if (target
->state
!= TARGET_HALTED
) {
1656 LOG_WARNING("target not halted");
1657 return ERROR_TARGET_NOT_HALTED
;
1661 if (breakpoint
->set
) {
1662 aarch64_unset_breakpoint(target
, breakpoint
);
1663 if (breakpoint
->type
== BKPT_HARD
)
1664 aarch64
->brp_num_available
++;
1671 * Cortex-A8 Reset functions
1674 static int aarch64_assert_reset(struct target
*target
)
1676 struct armv8_common
*armv8
= target_to_armv8(target
);
1680 /* FIXME when halt is requested, make it work somehow... */
1682 /* Issue some kind of warm reset. */
1683 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1684 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1685 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1686 /* REVISIT handle "pulls" cases, if there's
1687 * hardware that needs them to work.
1689 adapter_assert_reset();
1691 LOG_ERROR("%s: how to reset?", target_name(target
));
1695 /* registers are now invalid */
1696 if (target_was_examined(target
)) {
1697 register_cache_invalidate(armv8
->arm
.core_cache
);
1698 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1701 target
->state
= TARGET_RESET
;
1706 static int aarch64_deassert_reset(struct target
*target
)
1712 /* be certain SRST is off */
1713 adapter_deassert_reset();
1715 if (!target_was_examined(target
))
1718 retval
= aarch64_poll(target
);
1719 if (retval
!= ERROR_OK
)
1722 retval
= aarch64_init_debug_access(target
);
1723 if (retval
!= ERROR_OK
)
1726 if (target
->reset_halt
) {
1727 if (target
->state
!= TARGET_HALTED
) {
1728 LOG_WARNING("%s: ran after reset and before halt ...",
1729 target_name(target
));
1730 retval
= target_halt(target
);
1737 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1738 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1740 struct armv8_common
*armv8
= target_to_armv8(target
);
1741 struct arm_dpm
*dpm
= &armv8
->dpm
;
1742 struct arm
*arm
= &armv8
->arm
;
1745 armv8_reg_current(arm
, 1)->dirty
= true;
1747 /* change DCC to normal mode if necessary */
1748 if (*dscr
& DSCR_MA
) {
1750 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1751 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1752 if (retval
!= ERROR_OK
)
1757 uint32_t data
, opcode
;
1759 /* write the data to store into DTRRX */
1763 data
= target_buffer_get_u16(target
, buffer
);
1765 data
= target_buffer_get_u32(target
, buffer
);
1766 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1767 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1768 if (retval
!= ERROR_OK
)
1771 if (arm
->core_state
== ARM_STATE_AARCH64
)
1772 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1774 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1775 if (retval
!= ERROR_OK
)
1779 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1781 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1783 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1784 retval
= dpm
->instr_execute(dpm
, opcode
);
1785 if (retval
!= ERROR_OK
)
1796 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1797 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1799 struct armv8_common
*armv8
= target_to_armv8(target
);
1800 struct arm
*arm
= &armv8
->arm
;
1803 armv8_reg_current(arm
, 1)->dirty
= true;
1805 /* Step 1.d - Change DCC to memory mode */
1807 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1808 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1809 if (retval
!= ERROR_OK
)
1813 /* Step 2.a - Do the write */
1814 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1815 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1816 if (retval
!= ERROR_OK
)
1819 /* Step 3.a - Switch DTR mode back to Normal mode */
1821 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1822 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1823 if (retval
!= ERROR_OK
)
1829 static int aarch64_write_cpu_memory(struct target
*target
,
1830 uint64_t address
, uint32_t size
,
1831 uint32_t count
, const uint8_t *buffer
)
1833 /* write memory through APB-AP */
1834 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1835 struct armv8_common
*armv8
= target_to_armv8(target
);
1836 struct arm_dpm
*dpm
= &armv8
->dpm
;
1837 struct arm
*arm
= &armv8
->arm
;
1840 if (target
->state
!= TARGET_HALTED
) {
1841 LOG_WARNING("target not halted");
1842 return ERROR_TARGET_NOT_HALTED
;
1845 /* Mark register X0 as dirty, as it will be used
1846 * for transferring the data.
1847 * It will be restored automatically when exiting
1850 armv8_reg_current(arm
, 0)->dirty
= true;
1852 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1855 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1856 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1857 if (retval
!= ERROR_OK
)
1860 /* Set Normal access mode */
1861 dscr
= (dscr
& ~DSCR_MA
);
1862 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1863 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1864 if (retval
!= ERROR_OK
)
1867 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1868 /* Write X0 with value 'address' using write procedure */
1869 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1870 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1871 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1872 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1874 /* Write R0 with value 'address' using write procedure */
1875 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1876 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1877 retval
= dpm
->instr_write_data_dcc(dpm
,
1878 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1881 if (retval
!= ERROR_OK
)
1884 if (size
== 4 && (address
% 4) == 0)
1885 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1887 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1889 if (retval
!= ERROR_OK
) {
1890 /* Unset DTR mode */
1891 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1892 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1894 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1895 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1898 /* Check for sticky abort flags in the DSCR */
1899 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1900 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1901 if (retval
!= ERROR_OK
)
1905 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1906 /* Abort occurred - clear it and exit */
1907 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1908 armv8_dpm_handle_exception(dpm
, true);
1916 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1917 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1919 struct armv8_common
*armv8
= target_to_armv8(target
);
1920 struct arm_dpm
*dpm
= &armv8
->dpm
;
1921 struct arm
*arm
= &armv8
->arm
;
1924 armv8_reg_current(arm
, 1)->dirty
= true;
1926 /* change DCC to normal mode (if necessary) */
1927 if (*dscr
& DSCR_MA
) {
1929 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1930 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1931 if (retval
!= ERROR_OK
)
1936 uint32_t opcode
, data
;
1939 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1941 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1943 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1944 retval
= dpm
->instr_execute(dpm
, opcode
);
1945 if (retval
!= ERROR_OK
)
1948 if (arm
->core_state
== ARM_STATE_AARCH64
)
1949 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1951 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1952 if (retval
!= ERROR_OK
)
1955 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1956 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1957 if (retval
!= ERROR_OK
)
1961 *buffer
= (uint8_t)data
;
1963 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1965 target_buffer_set_u32(target
, buffer
, data
);
1975 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1976 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1978 struct armv8_common
*armv8
= target_to_armv8(target
);
1979 struct arm_dpm
*dpm
= &armv8
->dpm
;
1980 struct arm
*arm
= &armv8
->arm
;
1984 /* Mark X1 as dirty */
1985 armv8_reg_current(arm
, 1)->dirty
= true;
1987 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1988 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1989 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1991 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1992 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1995 if (retval
!= ERROR_OK
)
1998 /* Step 1.e - Change DCC to memory mode */
2000 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2001 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2002 if (retval
!= ERROR_OK
)
2005 /* Step 1.f - read DBGDTRTX and discard the value */
2006 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2007 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2008 if (retval
!= ERROR_OK
)
2012 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2013 * Abort flags are sticky, so can be read at end of transactions
2015 * This data is read in aligned to 32 bit boundary.
2019 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2020 * increments X0 by 4. */
2021 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
2022 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2023 if (retval
!= ERROR_OK
)
2027 /* Step 3.a - set DTR access mode back to Normal mode */
2029 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2030 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2031 if (retval
!= ERROR_OK
)
2034 /* Step 3.b - read DBGDTRTX for the final value */
2035 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2036 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2037 if (retval
!= ERROR_OK
)
2040 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2044 static int aarch64_read_cpu_memory(struct target
*target
,
2045 target_addr_t address
, uint32_t size
,
2046 uint32_t count
, uint8_t *buffer
)
2048 /* read memory through APB-AP */
2049 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2050 struct armv8_common
*armv8
= target_to_armv8(target
);
2051 struct arm_dpm
*dpm
= &armv8
->dpm
;
2052 struct arm
*arm
= &armv8
->arm
;
2055 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2056 address
, size
, count
);
2058 if (target
->state
!= TARGET_HALTED
) {
2059 LOG_WARNING("target not halted");
2060 return ERROR_TARGET_NOT_HALTED
;
2063 /* Mark register X0 as dirty, as it will be used
2064 * for transferring the data.
2065 * It will be restored automatically when exiting
2068 armv8_reg_current(arm
, 0)->dirty
= true;
2071 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2072 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2073 if (retval
!= ERROR_OK
)
2076 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2078 /* Set Normal access mode */
2080 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2081 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2082 if (retval
!= ERROR_OK
)
2085 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2086 /* Write X0 with value 'address' using write procedure */
2087 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2088 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2089 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2090 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2092 /* Write R0 with value 'address' using write procedure */
2093 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2094 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2095 retval
= dpm
->instr_write_data_dcc(dpm
,
2096 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2099 if (retval
!= ERROR_OK
)
2102 if (size
== 4 && (address
% 4) == 0)
2103 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2105 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2107 if (dscr
& DSCR_MA
) {
2109 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2110 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2113 if (retval
!= ERROR_OK
)
2116 /* Check for sticky abort flags in the DSCR */
2117 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2118 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2119 if (retval
!= ERROR_OK
)
2124 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2125 /* Abort occurred - clear it and exit */
2126 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2127 armv8_dpm_handle_exception(dpm
, true);
2135 static int aarch64_read_phys_memory(struct target
*target
,
2136 target_addr_t address
, uint32_t size
,
2137 uint32_t count
, uint8_t *buffer
)
2139 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2141 if (count
&& buffer
) {
2142 /* read memory through APB-AP */
2143 retval
= aarch64_mmu_modify(target
, 0);
2144 if (retval
!= ERROR_OK
)
2146 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2151 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2152 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2154 int mmu_enabled
= 0;
2157 /* determine if MMU was enabled on target stop */
2158 retval
= aarch64_mmu(target
, &mmu_enabled
);
2159 if (retval
!= ERROR_OK
)
2163 /* enable MMU as we could have disabled it for phys access */
2164 retval
= aarch64_mmu_modify(target
, 1);
2165 if (retval
!= ERROR_OK
)
2168 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2171 static int aarch64_write_phys_memory(struct target
*target
,
2172 target_addr_t address
, uint32_t size
,
2173 uint32_t count
, const uint8_t *buffer
)
2175 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2177 if (count
&& buffer
) {
2178 /* write memory through APB-AP */
2179 retval
= aarch64_mmu_modify(target
, 0);
2180 if (retval
!= ERROR_OK
)
2182 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2188 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2189 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2191 int mmu_enabled
= 0;
2194 /* determine if MMU was enabled on target stop */
2195 retval
= aarch64_mmu(target
, &mmu_enabled
);
2196 if (retval
!= ERROR_OK
)
2200 /* enable MMU as we could have disabled it for phys access */
2201 retval
= aarch64_mmu_modify(target
, 1);
2202 if (retval
!= ERROR_OK
)
2205 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2208 static int aarch64_handle_target_request(void *priv
)
2210 struct target
*target
= priv
;
2211 struct armv8_common
*armv8
= target_to_armv8(target
);
2214 if (!target_was_examined(target
))
2216 if (!target
->dbg_msg_enabled
)
2219 if (target
->state
== TARGET_RUNNING
) {
2222 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2223 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2225 /* check if we have data */
2226 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2227 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2228 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2229 if (retval
== ERROR_OK
) {
2230 target_request(target
, request
);
2231 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2232 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2240 static int aarch64_examine_first(struct target
*target
)
2242 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2243 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2244 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2245 struct aarch64_private_config
*pc
;
2247 int retval
= ERROR_OK
;
2248 uint64_t debug
, ttypr
;
2250 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2251 debug
= ttypr
= cpuid
= 0;
2253 /* Search for the APB-AB - it is needed for access to debug registers */
2254 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2255 if (retval
!= ERROR_OK
) {
2256 LOG_ERROR("Could not find APB-AP for debug access");
2260 retval
= mem_ap_init(armv8
->debug_ap
);
2261 if (retval
!= ERROR_OK
) {
2262 LOG_ERROR("Could not initialize the APB-AP");
2266 armv8
->debug_ap
->memaccess_tck
= 10;
2268 if (!target
->dbgbase_set
) {
2270 /* Get ROM Table base */
2272 int32_t coreidx
= target
->coreid
;
2273 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2274 if (retval
!= ERROR_OK
)
2276 /* Lookup 0x15 -- Processor DAP */
2277 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2278 &armv8
->debug_base
, &coreidx
);
2279 if (retval
!= ERROR_OK
)
2281 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2282 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2284 armv8
->debug_base
= target
->dbgbase
;
2286 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2287 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2288 if (retval
!= ERROR_OK
) {
2289 LOG_DEBUG("Examine %s failed", "oslock");
2293 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2294 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2295 if (retval
!= ERROR_OK
) {
2296 LOG_DEBUG("Examine %s failed", "CPUID");
2300 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2301 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2302 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2303 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2304 if (retval
!= ERROR_OK
) {
2305 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2308 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2309 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2310 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2311 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2312 if (retval
!= ERROR_OK
) {
2313 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2317 retval
= dap_run(armv8
->debug_ap
->dap
);
2318 if (retval
!= ERROR_OK
) {
2319 LOG_ERROR("%s: examination failed\n", target_name(target
));
2324 ttypr
= (ttypr
<< 32) | tmp0
;
2326 debug
= (debug
<< 32) | tmp2
;
2328 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2329 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2330 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2332 if (target
->private_config
== NULL
)
2335 pc
= (struct aarch64_private_config
*)target
->private_config
;
2336 if (pc
->cti
== NULL
)
2339 armv8
->cti
= pc
->cti
;
2341 retval
= aarch64_dpm_setup(aarch64
, debug
);
2342 if (retval
!= ERROR_OK
)
2345 /* Setup Breakpoint Register Pairs */
2346 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2347 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2348 aarch64
->brp_num_available
= aarch64
->brp_num
;
2349 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2350 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2351 aarch64
->brp_list
[i
].used
= 0;
2352 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2353 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2355 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2356 aarch64
->brp_list
[i
].value
= 0;
2357 aarch64
->brp_list
[i
].control
= 0;
2358 aarch64
->brp_list
[i
].BRPn
= i
;
2361 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2363 target
->state
= TARGET_UNKNOWN
;
2364 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2365 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2366 target_set_examined(target
);
2370 static int aarch64_examine(struct target
*target
)
2372 int retval
= ERROR_OK
;
2374 /* don't re-probe hardware after each reset */
2375 if (!target_was_examined(target
))
2376 retval
= aarch64_examine_first(target
);
2378 /* Configure core debug access */
2379 if (retval
== ERROR_OK
)
2380 retval
= aarch64_init_debug_access(target
);
2386 * Cortex-A8 target creation and initialization
2389 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2390 struct target
*target
)
2392 /* examine_first() does a bunch of this */
2393 arm_semihosting_init(target
);
2397 static int aarch64_init_arch_info(struct target
*target
,
2398 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2400 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2402 /* Setup struct aarch64_common */
2403 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2404 armv8
->arm
.dap
= dap
;
2406 /* register arch-specific functions */
2407 armv8
->examine_debug_reason
= NULL
;
2408 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2409 armv8
->pre_restore_context
= NULL
;
2410 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2412 armv8_init_arch_info(target
, armv8
);
2413 target_register_timer_callback(aarch64_handle_target_request
, 1,
2414 TARGET_TIMER_TYPE_PERIODIC
, target
);
2419 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2421 struct aarch64_private_config
*pc
= target
->private_config
;
2422 struct aarch64_common
*aarch64
;
2424 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2427 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2428 if (aarch64
== NULL
) {
2429 LOG_ERROR("Out of memory");
2433 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2436 static void aarch64_deinit_target(struct target
*target
)
2438 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2439 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2440 struct arm_dpm
*dpm
= &armv8
->dpm
;
2442 armv8_free_reg_cache(target
);
2443 free(aarch64
->brp_list
);
2446 free(target
->private_config
);
2450 static int aarch64_mmu(struct target
*target
, int *enabled
)
2452 if (target
->state
!= TARGET_HALTED
) {
2453 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2454 return ERROR_TARGET_INVALID
;
2457 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2461 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2462 target_addr_t
*phys
)
2464 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2468 * private target configuration items
2470 enum aarch64_cfg_param
{
2474 static const Jim_Nvp nvp_config_opts
[] = {
2475 { .name
= "-cti", .value
= CFG_CTI
},
2476 { .name
= NULL
, .value
= -1 }
2479 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2481 struct aarch64_private_config
*pc
;
2485 pc
= (struct aarch64_private_config
*)target
->private_config
;
2487 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2488 target
->private_config
= pc
;
2492 * Call adiv5_jim_configure() to parse the common DAP options
2493 * It will return JIM_CONTINUE if it didn't find any known
2494 * options, JIM_OK if it correctly parsed the topmost option
2495 * and JIM_ERR if an error occured during parameter evaluation.
2496 * For JIM_CONTINUE, we check our own params.
2498 e
= adiv5_jim_configure(target
, goi
);
2499 if (e
!= JIM_CONTINUE
)
2502 /* parse config or cget options ... */
2503 if (goi
->argc
> 0) {
2504 Jim_SetEmptyResult(goi
->interp
);
2506 /* check first if topmost item is for us */
2507 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2510 return JIM_CONTINUE
;
2512 e
= Jim_GetOpt_Obj(goi
, NULL
);
2518 if (goi
->isconfigure
) {
2520 struct arm_cti
*cti
;
2521 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2524 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2526 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2531 if (goi
->argc
!= 0) {
2532 Jim_WrongNumArgs(goi
->interp
,
2533 goi
->argc
, goi
->argv
,
2538 if (pc
== NULL
|| pc
->cti
== NULL
) {
2539 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2542 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2548 return JIM_CONTINUE
;
2555 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2557 struct target
*target
= get_current_target(CMD_CTX
);
2558 struct armv8_common
*armv8
= target_to_armv8(target
);
2560 return armv8_handle_cache_info_command(CMD
,
2561 &armv8
->armv8_mmu
.armv8_cache
);
2565 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2567 struct target
*target
= get_current_target(CMD_CTX
);
2568 if (!target_was_examined(target
)) {
2569 LOG_ERROR("target not examined yet");
2573 return aarch64_init_debug_access(target
);
2576 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2578 struct target
*target
= get_current_target(CMD_CTX
);
2579 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2581 static const Jim_Nvp nvp_maskisr_modes
[] = {
2582 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2583 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2584 { .name
= NULL
, .value
= -1 },
2589 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2590 if (n
->name
== NULL
) {
2591 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2592 return ERROR_COMMAND_SYNTAX_ERROR
;
2595 aarch64
->isrmasking_mode
= n
->value
;
2598 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2599 command_print(CMD
, "aarch64 interrupt mask %s", n
->name
);
2604 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2606 struct command_context
*context
;
2607 struct target
*target
;
2610 bool is_mcr
= false;
2613 if (Jim_CompareStringImmediate(interp
, argv
[0], "mcr")) {
2620 context
= current_command_context(interp
);
2621 assert(context
!= NULL
);
2623 target
= get_current_target(context
);
2624 if (target
== NULL
) {
2625 LOG_ERROR("%s: no current target", __func__
);
2628 if (!target_was_examined(target
)) {
2629 LOG_ERROR("%s: not yet examined", target_name(target
));
2633 arm
= target_to_arm(target
);
2635 LOG_ERROR("%s: not an ARM", target_name(target
));
2639 if (target
->state
!= TARGET_HALTED
)
2640 return ERROR_TARGET_NOT_HALTED
;
2642 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2643 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
2647 if (argc
!= arg_cnt
) {
2648 LOG_ERROR("%s: wrong number of arguments", __func__
);
2660 /* NOTE: parameter sequence matches ARM instruction set usage:
2661 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2662 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2663 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2665 retval
= Jim_GetLong(interp
, argv
[1], &l
);
2666 if (retval
!= JIM_OK
)
2669 LOG_ERROR("%s: %s %d out of range", __func__
,
2670 "coprocessor", (int) l
);
2675 retval
= Jim_GetLong(interp
, argv
[2], &l
);
2676 if (retval
!= JIM_OK
)
2679 LOG_ERROR("%s: %s %d out of range", __func__
,
2685 retval
= Jim_GetLong(interp
, argv
[3], &l
);
2686 if (retval
!= JIM_OK
)
2689 LOG_ERROR("%s: %s %d out of range", __func__
,
2695 retval
= Jim_GetLong(interp
, argv
[4], &l
);
2696 if (retval
!= JIM_OK
)
2699 LOG_ERROR("%s: %s %d out of range", __func__
,
2705 retval
= Jim_GetLong(interp
, argv
[5], &l
);
2706 if (retval
!= JIM_OK
)
2709 LOG_ERROR("%s: %s %d out of range", __func__
,
2717 if (is_mcr
== true) {
2718 retval
= Jim_GetLong(interp
, argv
[6], &l
);
2719 if (retval
!= JIM_OK
)
2723 /* NOTE: parameters reordered! */
2724 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2725 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
2726 if (retval
!= ERROR_OK
)
2729 /* NOTE: parameters reordered! */
2730 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2731 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
2732 if (retval
!= ERROR_OK
)
2735 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
2741 static const struct command_registration aarch64_exec_command_handlers
[] = {
2743 .name
= "cache_info",
2744 .handler
= aarch64_handle_cache_info_command
,
2745 .mode
= COMMAND_EXEC
,
2746 .help
= "display information about target caches",
2751 .handler
= aarch64_handle_dbginit_command
,
2752 .mode
= COMMAND_EXEC
,
2753 .help
= "Initialize core debug",
2758 .handler
= aarch64_mask_interrupts_command
,
2759 .mode
= COMMAND_ANY
,
2760 .help
= "mask aarch64 interrupts during single-step",
2761 .usage
= "['on'|'off']",
2765 .mode
= COMMAND_EXEC
,
2766 .jim_handler
= jim_mcrmrc
,
2767 .help
= "write coprocessor register",
2768 .usage
= "cpnum op1 CRn CRm op2 value",
2772 .mode
= COMMAND_EXEC
,
2773 .jim_handler
= jim_mcrmrc
,
2774 .help
= "read coprocessor register",
2775 .usage
= "cpnum op1 CRn CRm op2",
2778 .chain
= smp_command_handlers
,
2782 COMMAND_REGISTRATION_DONE
2785 static const struct command_registration aarch64_command_handlers
[] = {
2787 .chain
= armv8_command_handlers
,
2791 .mode
= COMMAND_ANY
,
2792 .help
= "Aarch64 command group",
2794 .chain
= aarch64_exec_command_handlers
,
2796 COMMAND_REGISTRATION_DONE
2799 struct target_type aarch64_target
= {
2802 .poll
= aarch64_poll
,
2803 .arch_state
= armv8_arch_state
,
2805 .halt
= aarch64_halt
,
2806 .resume
= aarch64_resume
,
2807 .step
= aarch64_step
,
2809 .assert_reset
= aarch64_assert_reset
,
2810 .deassert_reset
= aarch64_deassert_reset
,
2812 /* REVISIT allow exporting VFP3 registers ... */
2813 .get_gdb_arch
= armv8_get_gdb_arch
,
2814 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2816 .read_memory
= aarch64_read_memory
,
2817 .write_memory
= aarch64_write_memory
,
2819 .add_breakpoint
= aarch64_add_breakpoint
,
2820 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2821 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2822 .remove_breakpoint
= aarch64_remove_breakpoint
,
2823 .add_watchpoint
= NULL
,
2824 .remove_watchpoint
= NULL
,
2826 .commands
= aarch64_command_handlers
,
2827 .target_create
= aarch64_target_create
,
2828 .target_jim_configure
= aarch64_jim_configure
,
2829 .init_target
= aarch64_init_target
,
2830 .deinit_target
= aarch64_deinit_target
,
2831 .examine
= aarch64_examine
,
2833 .read_phys_memory
= aarch64_read_phys_memory
,
2834 .write_phys_memory
= aarch64_write_phys_memory
,
2836 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)