1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include <helper/time_support.h>
44 struct aarch64_private_config
{
45 struct adiv5_private_config adiv5_config
;
49 static int aarch64_poll(struct target
*target
);
50 static int aarch64_debug_entry(struct target
*target
);
51 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
52 static int aarch64_set_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
, uint8_t matchmode
);
54 static int aarch64_set_context_breakpoint(struct target
*target
,
55 struct breakpoint
*breakpoint
, uint8_t matchmode
);
56 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
57 struct breakpoint
*breakpoint
);
58 static int aarch64_unset_breakpoint(struct target
*target
,
59 struct breakpoint
*breakpoint
);
60 static int aarch64_mmu(struct target
*target
, int *enabled
);
61 static int aarch64_virt2phys(struct target
*target
,
62 target_addr_t virt
, target_addr_t
*phys
);
63 static int aarch64_read_cpu_memory(struct target
*target
,
64 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
66 #define foreach_smp_target(pos, head) \
67 for (pos = head; (pos != NULL); pos = pos->next)
69 static int aarch64_restore_system_control_reg(struct target
*target
)
71 enum arm_mode target_mode
= ARM_MODE_ANY
;
72 int retval
= ERROR_OK
;
75 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
76 struct armv8_common
*armv8
= target_to_armv8(target
);
78 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
79 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82 switch (armv8
->arm
.core_mode
) {
84 target_mode
= ARMV8_64_EL1H
;
88 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
92 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
96 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
103 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
107 LOG_INFO("cannot read system control register in this mode");
111 if (target_mode
!= ARM_MODE_ANY
)
112 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
114 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
115 if (retval
!= ERROR_OK
)
118 if (target_mode
!= ARM_MODE_ANY
)
119 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
125 /* modify system_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int aarch64_mmu_modify(struct target
*target
, int enable
)
130 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
131 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
132 int retval
= ERROR_OK
;
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(aarch64
->system_control_reg
& 0x1U
)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
141 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
142 aarch64
->system_control_reg_curr
|= 0x1U
;
144 if (aarch64
->system_control_reg_curr
& 0x4U
) {
145 /* data cache is active */
146 aarch64
->system_control_reg_curr
&= ~0x4U
;
147 /* flush data cache armv8 function to be called */
148 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
149 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
151 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
152 aarch64
->system_control_reg_curr
&= ~0x1U
;
156 switch (armv8
->arm
.core_mode
) {
160 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
164 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
168 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
175 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
179 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
183 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
184 aarch64
->system_control_reg_curr
);
189 * Basic debug access, very low level assumes state is saved
191 static int aarch64_init_debug_access(struct target
*target
)
193 struct armv8_common
*armv8
= target_to_armv8(target
);
197 LOG_DEBUG("%s", target_name(target
));
199 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
200 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
201 if (retval
!= ERROR_OK
) {
202 LOG_DEBUG("Examine %s failed", "oslock");
206 /* Clear Sticky Power Down status Bit in PRSR to enable access to
207 the registers in the Core Power Domain */
208 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
209 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
210 if (retval
!= ERROR_OK
)
214 * Static CTI configuration:
215 * Channel 0 -> trigger outputs HALT request to PE
216 * Channel 1 -> trigger outputs Resume request to PE
217 * Gate all channel trigger events from entering the CTM
221 retval
= arm_cti_enable(armv8
->cti
, true);
222 /* By default, gate all channel events to and from the CTM */
223 if (retval
== ERROR_OK
)
224 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
225 /* output halt requests to PE on channel 0 event */
226 if (retval
== ERROR_OK
)
227 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
228 /* output restart requests to PE on channel 1 event */
229 if (retval
== ERROR_OK
)
230 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
231 if (retval
!= ERROR_OK
)
234 /* Resync breakpoint registers */
239 /* Write to memory mapped registers directly with no cache or mmu handling */
240 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
245 struct armv8_common
*armv8
= target_to_armv8(target
);
247 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
252 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
254 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
257 dpm
->arm
= &a8
->armv8_common
.arm
;
260 retval
= armv8_dpm_setup(dpm
);
261 if (retval
== ERROR_OK
)
262 retval
= armv8_dpm_initialize(dpm
);
267 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
269 struct armv8_common
*armv8
= target_to_armv8(target
);
270 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
273 static int aarch64_check_state_one(struct target
*target
,
274 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
276 struct armv8_common
*armv8
= target_to_armv8(target
);
280 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
281 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
282 if (retval
!= ERROR_OK
)
289 *p_result
= (prsr
& mask
) == (val
& mask
);
294 static int aarch64_wait_halt_one(struct target
*target
)
296 int retval
= ERROR_OK
;
299 int64_t then
= timeval_ms();
303 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
304 if (retval
!= ERROR_OK
|| halted
)
307 if (timeval_ms() > then
+ 1000) {
308 retval
= ERROR_TARGET_TIMEOUT
;
309 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
316 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
318 int retval
= ERROR_OK
;
319 struct target_list
*head
= target
->head
;
320 struct target
*first
= NULL
;
322 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
324 while (head
!= NULL
) {
325 struct target
*curr
= head
->target
;
326 struct armv8_common
*armv8
= target_to_armv8(curr
);
329 if (exc_target
&& curr
== target
)
331 if (!target_was_examined(curr
))
333 if (curr
->state
!= TARGET_RUNNING
)
336 /* HACK: mark this target as prepared for halting */
337 curr
->debug_reason
= DBG_REASON_DBGRQ
;
339 /* open the gate for channel 0 to let HALT requests pass to the CTM */
340 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
341 if (retval
== ERROR_OK
)
342 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
343 if (retval
!= ERROR_OK
)
346 LOG_DEBUG("target %s prepared", target_name(curr
));
353 if (exc_target
&& first
)
362 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
364 int retval
= ERROR_OK
;
365 struct armv8_common
*armv8
= target_to_armv8(target
);
367 LOG_DEBUG("%s", target_name(target
));
369 /* allow Halting Debug Mode */
370 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
371 if (retval
!= ERROR_OK
)
374 /* trigger an event on channel 0, this outputs a halt request to the PE */
375 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
376 if (retval
!= ERROR_OK
)
379 if (mode
== HALT_SYNC
) {
380 retval
= aarch64_wait_halt_one(target
);
381 if (retval
!= ERROR_OK
) {
382 if (retval
== ERROR_TARGET_TIMEOUT
)
383 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
391 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
393 struct target
*next
= target
;
396 /* prepare halt on all PEs of the group */
397 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
399 if (exc_target
&& next
== target
)
402 /* halt the target PE */
403 if (retval
== ERROR_OK
)
404 retval
= aarch64_halt_one(next
, HALT_LAZY
);
406 if (retval
!= ERROR_OK
)
409 /* wait for all PEs to halt */
410 int64_t then
= timeval_ms();
412 bool all_halted
= true;
413 struct target_list
*head
;
416 foreach_smp_target(head
, target
->head
) {
421 if (!target_was_examined(curr
))
424 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
425 if (retval
!= ERROR_OK
|| !halted
) {
434 if (timeval_ms() > then
+ 1000) {
435 retval
= ERROR_TARGET_TIMEOUT
;
440 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
441 * and it looks like the CTI's are not connected by a common
442 * trigger matrix. It seems that we need to halt one core in each
443 * cluster explicitly. So if we find that a core has not halted
444 * yet, we trigger an explicit halt for the second cluster.
446 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
447 if (retval
!= ERROR_OK
)
454 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
456 struct target
*gdb_target
= NULL
;
457 struct target_list
*head
;
460 if (debug_reason
== DBG_REASON_NOTHALTED
) {
461 LOG_DEBUG("Halting remaining targets in SMP group");
462 aarch64_halt_smp(target
, true);
465 /* poll all targets in the group, but skip the target that serves GDB */
466 foreach_smp_target(head
, target
->head
) {
468 /* skip calling context */
471 if (!target_was_examined(curr
))
473 /* skip targets that were already halted */
474 if (curr
->state
== TARGET_HALTED
)
476 /* remember the gdb_service->target */
477 if (curr
->gdb_service
!= NULL
)
478 gdb_target
= curr
->gdb_service
->target
;
480 if (curr
== gdb_target
)
483 /* avoid recursion in aarch64_poll() */
489 /* after all targets were updated, poll the gdb serving target */
490 if (gdb_target
!= NULL
&& gdb_target
!= target
)
491 aarch64_poll(gdb_target
);
497 * Aarch64 Run control
500 static int aarch64_poll(struct target
*target
)
502 enum target_state prev_target_state
;
503 int retval
= ERROR_OK
;
506 retval
= aarch64_check_state_one(target
,
507 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
508 if (retval
!= ERROR_OK
)
512 prev_target_state
= target
->state
;
513 if (prev_target_state
!= TARGET_HALTED
) {
514 enum target_debug_reason debug_reason
= target
->debug_reason
;
516 /* We have a halting debug event */
517 target
->state
= TARGET_HALTED
;
518 LOG_DEBUG("Target %s halted", target_name(target
));
519 retval
= aarch64_debug_entry(target
);
520 if (retval
!= ERROR_OK
)
524 update_halt_gdb(target
, debug_reason
);
526 if (arm_semihosting(target
, &retval
) != 0)
529 switch (prev_target_state
) {
533 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
535 case TARGET_DEBUG_RUNNING
:
536 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
543 target
->state
= TARGET_RUNNING
;
548 static int aarch64_halt(struct target
*target
)
550 struct armv8_common
*armv8
= target_to_armv8(target
);
551 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
554 return aarch64_halt_smp(target
, false);
556 return aarch64_halt_one(target
, HALT_SYNC
);
559 static int aarch64_restore_one(struct target
*target
, int current
,
560 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
562 struct armv8_common
*armv8
= target_to_armv8(target
);
563 struct arm
*arm
= &armv8
->arm
;
567 LOG_DEBUG("%s", target_name(target
));
569 if (!debug_execution
)
570 target_free_all_working_areas(target
);
572 /* current = 1: continue on current pc, otherwise continue at <address> */
573 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
575 resume_pc
= *address
;
577 *address
= resume_pc
;
579 /* Make sure that the Armv7 gdb thumb fixups does not
580 * kill the return address
582 switch (arm
->core_state
) {
584 resume_pc
&= 0xFFFFFFFC;
586 case ARM_STATE_AARCH64
:
587 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
589 case ARM_STATE_THUMB
:
590 case ARM_STATE_THUMB_EE
:
591 /* When the return address is loaded into PC
592 * bit 0 must be 1 to stay in Thumb state
596 case ARM_STATE_JAZELLE
:
597 LOG_ERROR("How do I resume into Jazelle state??");
600 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
601 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
605 /* called it now before restoring context because it uses cpu
606 * register r0 for restoring system control register */
607 retval
= aarch64_restore_system_control_reg(target
);
608 if (retval
== ERROR_OK
)
609 retval
= aarch64_restore_context(target
, handle_breakpoints
);
615 * prepare single target for restart
619 static int aarch64_prepare_restart_one(struct target
*target
)
621 struct armv8_common
*armv8
= target_to_armv8(target
);
626 LOG_DEBUG("%s", target_name(target
));
628 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
629 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
630 if (retval
!= ERROR_OK
)
633 if ((dscr
& DSCR_ITE
) == 0)
634 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
635 if ((dscr
& DSCR_ERR
) != 0)
636 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
638 /* acknowledge a pending CTI halt event */
639 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
641 * open the CTI gate for channel 1 so that the restart events
642 * get passed along to all PEs. Also close gate for channel 0
643 * to isolate the PE from halt events.
645 if (retval
== ERROR_OK
)
646 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
647 if (retval
== ERROR_OK
)
648 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
650 /* make sure that DSCR.HDE is set */
651 if (retval
== ERROR_OK
) {
653 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
654 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
657 if (retval
== ERROR_OK
) {
658 /* clear sticky bits in PRSR, SDR is now 0 */
659 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
660 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
666 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
668 struct armv8_common
*armv8
= target_to_armv8(target
);
671 LOG_DEBUG("%s", target_name(target
));
673 /* trigger an event on channel 1, generates a restart request to the PE */
674 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
675 if (retval
!= ERROR_OK
)
678 if (mode
== RESTART_SYNC
) {
679 int64_t then
= timeval_ms();
683 * if PRSR.SDR is set now, the target did restart, even
684 * if it's now already halted again (e.g. due to breakpoint)
686 retval
= aarch64_check_state_one(target
,
687 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
688 if (retval
!= ERROR_OK
|| resumed
)
691 if (timeval_ms() > then
+ 1000) {
692 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
693 retval
= ERROR_TARGET_TIMEOUT
;
699 if (retval
!= ERROR_OK
)
702 target
->debug_reason
= DBG_REASON_NOTHALTED
;
703 target
->state
= TARGET_RUNNING
;
708 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
712 LOG_DEBUG("%s", target_name(target
));
714 retval
= aarch64_prepare_restart_one(target
);
715 if (retval
== ERROR_OK
)
716 retval
= aarch64_do_restart_one(target
, mode
);
722 * prepare all but the current target for restart
724 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
726 int retval
= ERROR_OK
;
727 struct target_list
*head
;
728 struct target
*first
= NULL
;
731 foreach_smp_target(head
, target
->head
) {
732 struct target
*curr
= head
->target
;
734 /* skip calling target */
737 if (!target_was_examined(curr
))
739 if (curr
->state
!= TARGET_HALTED
)
742 /* resume at current address, not in step mode */
743 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
744 if (retval
== ERROR_OK
)
745 retval
= aarch64_prepare_restart_one(curr
);
746 if (retval
!= ERROR_OK
) {
747 LOG_ERROR("failed to restore target %s", target_name(curr
));
750 /* remember the first valid target in the group */
762 static int aarch64_step_restart_smp(struct target
*target
)
764 int retval
= ERROR_OK
;
765 struct target_list
*head
;
766 struct target
*first
= NULL
;
768 LOG_DEBUG("%s", target_name(target
));
770 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
771 if (retval
!= ERROR_OK
)
775 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
776 if (retval
!= ERROR_OK
) {
777 LOG_DEBUG("error restarting target %s", target_name(first
));
781 int64_t then
= timeval_ms();
783 struct target
*curr
= target
;
784 bool all_resumed
= true;
786 foreach_smp_target(head
, target
->head
) {
795 if (!target_was_examined(curr
))
798 retval
= aarch64_check_state_one(curr
,
799 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
800 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
805 if (curr
->state
!= TARGET_RUNNING
) {
806 curr
->state
= TARGET_RUNNING
;
807 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
808 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
815 if (timeval_ms() > then
+ 1000) {
816 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
817 retval
= ERROR_TARGET_TIMEOUT
;
821 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
822 * and it looks like the CTI's are not connected by a common
823 * trigger matrix. It seems that we need to halt one core in each
824 * cluster explicitly. So if we find that a core has not halted
825 * yet, we trigger an explicit resume for the second cluster.
827 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
828 if (retval
!= ERROR_OK
)
835 static int aarch64_resume(struct target
*target
, int current
,
836 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
839 uint64_t addr
= address
;
841 struct armv8_common
*armv8
= target_to_armv8(target
);
842 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
844 if (target
->state
!= TARGET_HALTED
)
845 return ERROR_TARGET_NOT_HALTED
;
848 * If this target is part of a SMP group, prepare the others
849 * targets for resuming. This involves restoring the complete
850 * target register context and setting up CTI gates to accept
851 * resume events from the trigger matrix.
854 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
855 if (retval
!= ERROR_OK
)
859 /* all targets prepared, restore and restart the current target */
860 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
862 if (retval
== ERROR_OK
)
863 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
864 if (retval
!= ERROR_OK
)
868 int64_t then
= timeval_ms();
870 struct target
*curr
= target
;
871 struct target_list
*head
;
872 bool all_resumed
= true;
874 foreach_smp_target(head
, target
->head
) {
881 if (!target_was_examined(curr
))
884 retval
= aarch64_check_state_one(curr
,
885 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
886 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
891 if (curr
->state
!= TARGET_RUNNING
) {
892 curr
->state
= TARGET_RUNNING
;
893 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
894 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
901 if (timeval_ms() > then
+ 1000) {
902 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
903 retval
= ERROR_TARGET_TIMEOUT
;
908 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
909 * and it looks like the CTI's are not connected by a common
910 * trigger matrix. It seems that we need to halt one core in each
911 * cluster explicitly. So if we find that a core has not halted
912 * yet, we trigger an explicit resume for the second cluster.
914 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
915 if (retval
!= ERROR_OK
)
920 if (retval
!= ERROR_OK
)
923 target
->debug_reason
= DBG_REASON_NOTHALTED
;
925 if (!debug_execution
) {
926 target
->state
= TARGET_RUNNING
;
927 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
928 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
930 target
->state
= TARGET_DEBUG_RUNNING
;
931 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
932 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
938 static int aarch64_debug_entry(struct target
*target
)
940 int retval
= ERROR_OK
;
941 struct armv8_common
*armv8
= target_to_armv8(target
);
942 struct arm_dpm
*dpm
= &armv8
->dpm
;
943 enum arm_state core_state
;
946 /* make sure to clear all sticky errors */
947 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
948 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
949 if (retval
== ERROR_OK
)
950 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
951 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
952 if (retval
== ERROR_OK
)
953 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
955 if (retval
!= ERROR_OK
)
958 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
961 core_state
= armv8_dpm_get_core_state(dpm
);
962 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
963 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
965 /* close the CTI gate for all events */
966 if (retval
== ERROR_OK
)
967 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
968 /* discard async exceptions */
969 if (retval
== ERROR_OK
)
970 retval
= dpm
->instr_cpsr_sync(dpm
);
971 if (retval
!= ERROR_OK
)
974 /* Examine debug reason */
975 armv8_dpm_report_dscr(dpm
, dscr
);
977 /* save address of instruction that triggered the watchpoint? */
978 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
982 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
983 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
985 if (retval
!= ERROR_OK
)
989 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
990 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
992 if (retval
!= ERROR_OK
)
995 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
998 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1000 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1001 retval
= armv8
->post_debug_entry(target
);
1006 static int aarch64_post_debug_entry(struct target
*target
)
1008 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1009 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1011 enum arm_mode target_mode
= ARM_MODE_ANY
;
1014 switch (armv8
->arm
.core_mode
) {
1016 target_mode
= ARMV8_64_EL1H
;
1020 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1024 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1028 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1035 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1039 LOG_INFO("cannot read system control register in this mode");
1043 if (target_mode
!= ARM_MODE_ANY
)
1044 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1046 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1047 if (retval
!= ERROR_OK
)
1050 if (target_mode
!= ARM_MODE_ANY
)
1051 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1053 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1054 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1056 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1057 armv8_identify_cache(armv8
);
1058 armv8_read_mpidr(armv8
);
1061 armv8
->armv8_mmu
.mmu_enabled
=
1062 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1063 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1064 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1065 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1066 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1071 * single-step a target
1073 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1074 int handle_breakpoints
)
1076 struct armv8_common
*armv8
= target_to_armv8(target
);
1077 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1078 int saved_retval
= ERROR_OK
;
1082 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1084 if (target
->state
!= TARGET_HALTED
) {
1085 LOG_WARNING("target not halted");
1086 return ERROR_TARGET_NOT_HALTED
;
1089 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1090 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1091 /* make sure EDECR.SS is not set when restoring the register */
1093 if (retval
== ERROR_OK
) {
1095 /* set EDECR.SS to enter hardware step mode */
1096 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1097 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1099 /* disable interrupts while stepping */
1100 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1101 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1102 /* bail out if stepping setup has failed */
1103 if (retval
!= ERROR_OK
)
1106 if (target
->smp
&& (current
== 1)) {
1108 * isolate current target so that it doesn't get resumed
1109 * together with the others
1111 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1112 /* resume all other targets in the group */
1113 if (retval
== ERROR_OK
)
1114 retval
= aarch64_step_restart_smp(target
);
1115 if (retval
!= ERROR_OK
) {
1116 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1119 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1122 /* all other targets running, restore and restart the current target */
1123 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1124 if (retval
== ERROR_OK
)
1125 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1127 if (retval
!= ERROR_OK
)
1130 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1131 if (!handle_breakpoints
)
1132 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1134 int64_t then
= timeval_ms();
1139 retval
= aarch64_check_state_one(target
,
1140 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1141 if (retval
!= ERROR_OK
|| stepped
)
1144 if (timeval_ms() > then
+ 100) {
1145 LOG_ERROR("timeout waiting for target %s halt after step",
1146 target_name(target
));
1147 retval
= ERROR_TARGET_TIMEOUT
;
1153 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1154 * causes a timeout. The core takes the step but doesn't complete it and so
1155 * debug state is never entered. However, you can manually halt the core
1156 * as an external debug even is also a WFI wakeup event.
1158 if (retval
== ERROR_TARGET_TIMEOUT
)
1159 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1162 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1163 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1164 if (retval
!= ERROR_OK
)
1167 /* restore interrupts */
1168 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1169 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1170 if (retval
!= ERROR_OK
)
1174 if (saved_retval
!= ERROR_OK
)
1175 return saved_retval
;
1177 return aarch64_poll(target
);
1180 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1182 struct armv8_common
*armv8
= target_to_armv8(target
);
1183 struct arm
*arm
= &armv8
->arm
;
1187 LOG_DEBUG("%s", target_name(target
));
1189 if (armv8
->pre_restore_context
)
1190 armv8
->pre_restore_context(target
);
1192 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1193 if (retval
== ERROR_OK
) {
1194 /* registers are now invalid */
1195 register_cache_invalidate(arm
->core_cache
);
1196 register_cache_invalidate(arm
->core_cache
->next
);
1203 * Cortex-A8 Breakpoint and watchpoint functions
1206 /* Setup hardware Breakpoint Register Pair */
1207 static int aarch64_set_breakpoint(struct target
*target
,
1208 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1213 uint8_t byte_addr_select
= 0x0F;
1214 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1215 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1216 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1218 if (breakpoint
->set
) {
1219 LOG_WARNING("breakpoint already set");
1223 if (breakpoint
->type
== BKPT_HARD
) {
1225 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1227 if (brp_i
>= aarch64
->brp_num
) {
1228 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1229 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1231 breakpoint
->set
= brp_i
+ 1;
1232 if (breakpoint
->length
== 2)
1233 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1234 control
= ((matchmode
& 0x7) << 20)
1236 | (byte_addr_select
<< 5)
1238 brp_list
[brp_i
].used
= 1;
1239 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1240 brp_list
[brp_i
].control
= control
;
1241 bpt_value
= brp_list
[brp_i
].value
;
1243 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1244 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1245 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1246 if (retval
!= ERROR_OK
)
1248 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1249 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1250 (uint32_t)(bpt_value
>> 32));
1251 if (retval
!= ERROR_OK
)
1254 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1255 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1256 brp_list
[brp_i
].control
);
1257 if (retval
!= ERROR_OK
)
1259 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1260 brp_list
[brp_i
].control
,
1261 brp_list
[brp_i
].value
);
1263 } else if (breakpoint
->type
== BKPT_SOFT
) {
1266 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1267 retval
= target_read_memory(target
,
1268 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1269 breakpoint
->length
, 1,
1270 breakpoint
->orig_instr
);
1271 if (retval
!= ERROR_OK
)
1274 armv8_cache_d_inner_flush_virt(armv8
,
1275 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1276 breakpoint
->length
);
1278 retval
= target_write_memory(target
,
1279 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1280 breakpoint
->length
, 1, code
);
1281 if (retval
!= ERROR_OK
)
1284 armv8_cache_d_inner_flush_virt(armv8
,
1285 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1286 breakpoint
->length
);
1288 armv8_cache_i_inner_inval_virt(armv8
,
1289 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1290 breakpoint
->length
);
1292 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1295 /* Ensure that halting debug mode is enable */
1296 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1297 if (retval
!= ERROR_OK
) {
1298 LOG_DEBUG("Failed to set DSCR.HDE");
1305 static int aarch64_set_context_breakpoint(struct target
*target
,
1306 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1308 int retval
= ERROR_FAIL
;
1311 uint8_t byte_addr_select
= 0x0F;
1312 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1313 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1314 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1316 if (breakpoint
->set
) {
1317 LOG_WARNING("breakpoint already set");
1320 /*check available context BRPs*/
1321 while ((brp_list
[brp_i
].used
||
1322 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1325 if (brp_i
>= aarch64
->brp_num
) {
1326 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1330 breakpoint
->set
= brp_i
+ 1;
1331 control
= ((matchmode
& 0x7) << 20)
1333 | (byte_addr_select
<< 5)
1335 brp_list
[brp_i
].used
= 1;
1336 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1337 brp_list
[brp_i
].control
= control
;
1338 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1339 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1340 brp_list
[brp_i
].value
);
1341 if (retval
!= ERROR_OK
)
1343 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1344 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1345 brp_list
[brp_i
].control
);
1346 if (retval
!= ERROR_OK
)
1348 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1349 brp_list
[brp_i
].control
,
1350 brp_list
[brp_i
].value
);
1355 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1357 int retval
= ERROR_FAIL
;
1358 int brp_1
= 0; /* holds the contextID pair */
1359 int brp_2
= 0; /* holds the IVA pair */
1360 uint32_t control_CTX
, control_IVA
;
1361 uint8_t CTX_byte_addr_select
= 0x0F;
1362 uint8_t IVA_byte_addr_select
= 0x0F;
1363 uint8_t CTX_machmode
= 0x03;
1364 uint8_t IVA_machmode
= 0x01;
1365 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1366 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1367 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1369 if (breakpoint
->set
) {
1370 LOG_WARNING("breakpoint already set");
1373 /*check available context BRPs*/
1374 while ((brp_list
[brp_1
].used
||
1375 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1378 printf("brp(CTX) found num: %d\n", brp_1
);
1379 if (brp_1
>= aarch64
->brp_num
) {
1380 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1384 while ((brp_list
[brp_2
].used
||
1385 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1388 printf("brp(IVA) found num: %d\n", brp_2
);
1389 if (brp_2
>= aarch64
->brp_num
) {
1390 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1394 breakpoint
->set
= brp_1
+ 1;
1395 breakpoint
->linked_BRP
= brp_2
;
1396 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1399 | (CTX_byte_addr_select
<< 5)
1401 brp_list
[brp_1
].used
= 1;
1402 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1403 brp_list
[brp_1
].control
= control_CTX
;
1404 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1405 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1406 brp_list
[brp_1
].value
);
1407 if (retval
!= ERROR_OK
)
1409 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1410 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1411 brp_list
[brp_1
].control
);
1412 if (retval
!= ERROR_OK
)
1415 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1418 | (IVA_byte_addr_select
<< 5)
1420 brp_list
[brp_2
].used
= 1;
1421 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1422 brp_list
[brp_2
].control
= control_IVA
;
1423 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1424 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1425 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1426 if (retval
!= ERROR_OK
)
1428 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1429 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1430 brp_list
[brp_2
].value
>> 32);
1431 if (retval
!= ERROR_OK
)
1433 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1434 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1435 brp_list
[brp_2
].control
);
1436 if (retval
!= ERROR_OK
)
1442 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1445 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1446 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1447 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1449 if (!breakpoint
->set
) {
1450 LOG_WARNING("breakpoint not set");
1454 if (breakpoint
->type
== BKPT_HARD
) {
1455 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1456 int brp_i
= breakpoint
->set
- 1;
1457 int brp_j
= breakpoint
->linked_BRP
;
1458 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1459 LOG_DEBUG("Invalid BRP number in breakpoint");
1462 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1463 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1464 brp_list
[brp_i
].used
= 0;
1465 brp_list
[brp_i
].value
= 0;
1466 brp_list
[brp_i
].control
= 0;
1467 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1468 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1469 brp_list
[brp_i
].control
);
1470 if (retval
!= ERROR_OK
)
1472 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1473 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1474 (uint32_t)brp_list
[brp_i
].value
);
1475 if (retval
!= ERROR_OK
)
1477 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1478 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1479 (uint32_t)brp_list
[brp_i
].value
);
1480 if (retval
!= ERROR_OK
)
1482 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1483 LOG_DEBUG("Invalid BRP number in breakpoint");
1486 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1487 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1488 brp_list
[brp_j
].used
= 0;
1489 brp_list
[brp_j
].value
= 0;
1490 brp_list
[brp_j
].control
= 0;
1491 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1492 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1493 brp_list
[brp_j
].control
);
1494 if (retval
!= ERROR_OK
)
1496 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1497 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1498 (uint32_t)brp_list
[brp_j
].value
);
1499 if (retval
!= ERROR_OK
)
1501 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1502 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1503 (uint32_t)brp_list
[brp_j
].value
);
1504 if (retval
!= ERROR_OK
)
1507 breakpoint
->linked_BRP
= 0;
1508 breakpoint
->set
= 0;
1512 int brp_i
= breakpoint
->set
- 1;
1513 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1514 LOG_DEBUG("Invalid BRP number in breakpoint");
1517 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1518 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1519 brp_list
[brp_i
].used
= 0;
1520 brp_list
[brp_i
].value
= 0;
1521 brp_list
[brp_i
].control
= 0;
1522 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1523 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1524 brp_list
[brp_i
].control
);
1525 if (retval
!= ERROR_OK
)
1527 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1528 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1529 brp_list
[brp_i
].value
);
1530 if (retval
!= ERROR_OK
)
1533 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1534 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1535 (uint32_t)brp_list
[brp_i
].value
);
1536 if (retval
!= ERROR_OK
)
1538 breakpoint
->set
= 0;
1542 /* restore original instruction (kept in target endianness) */
1544 armv8_cache_d_inner_flush_virt(armv8
,
1545 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1546 breakpoint
->length
);
1548 if (breakpoint
->length
== 4) {
1549 retval
= target_write_memory(target
,
1550 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1551 4, 1, breakpoint
->orig_instr
);
1552 if (retval
!= ERROR_OK
)
1555 retval
= target_write_memory(target
,
1556 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1557 2, 1, breakpoint
->orig_instr
);
1558 if (retval
!= ERROR_OK
)
1562 armv8_cache_d_inner_flush_virt(armv8
,
1563 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1564 breakpoint
->length
);
1566 armv8_cache_i_inner_inval_virt(armv8
,
1567 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1568 breakpoint
->length
);
1570 breakpoint
->set
= 0;
1575 static int aarch64_add_breakpoint(struct target
*target
,
1576 struct breakpoint
*breakpoint
)
1578 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1580 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1581 LOG_INFO("no hardware breakpoint available");
1582 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1585 if (breakpoint
->type
== BKPT_HARD
)
1586 aarch64
->brp_num_available
--;
1588 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1591 static int aarch64_add_context_breakpoint(struct target
*target
,
1592 struct breakpoint
*breakpoint
)
1594 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1596 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1597 LOG_INFO("no hardware breakpoint available");
1598 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1601 if (breakpoint
->type
== BKPT_HARD
)
1602 aarch64
->brp_num_available
--;
1604 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1607 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1608 struct breakpoint
*breakpoint
)
1610 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1612 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1613 LOG_INFO("no hardware breakpoint available");
1614 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1617 if (breakpoint
->type
== BKPT_HARD
)
1618 aarch64
->brp_num_available
--;
1620 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1624 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1626 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1629 /* It is perfectly possible to remove breakpoints while the target is running */
1630 if (target
->state
!= TARGET_HALTED
) {
1631 LOG_WARNING("target not halted");
1632 return ERROR_TARGET_NOT_HALTED
;
1636 if (breakpoint
->set
) {
1637 aarch64_unset_breakpoint(target
, breakpoint
);
1638 if (breakpoint
->type
== BKPT_HARD
)
1639 aarch64
->brp_num_available
++;
1646 * Cortex-A8 Reset functions
1649 static int aarch64_assert_reset(struct target
*target
)
1651 struct armv8_common
*armv8
= target_to_armv8(target
);
1655 /* FIXME when halt is requested, make it work somehow... */
1657 /* Issue some kind of warm reset. */
1658 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1659 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1660 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1661 /* REVISIT handle "pulls" cases, if there's
1662 * hardware that needs them to work.
1664 jtag_add_reset(0, 1);
1666 LOG_ERROR("%s: how to reset?", target_name(target
));
1670 /* registers are now invalid */
1671 if (target_was_examined(target
)) {
1672 register_cache_invalidate(armv8
->arm
.core_cache
);
1673 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1676 target
->state
= TARGET_RESET
;
1681 static int aarch64_deassert_reset(struct target
*target
)
1687 /* be certain SRST is off */
1688 jtag_add_reset(0, 0);
1690 if (!target_was_examined(target
))
1693 retval
= aarch64_poll(target
);
1694 if (retval
!= ERROR_OK
)
1697 if (target
->reset_halt
) {
1698 if (target
->state
!= TARGET_HALTED
) {
1699 LOG_WARNING("%s: ran after reset and before halt ...",
1700 target_name(target
));
1701 retval
= target_halt(target
);
1702 if (retval
!= ERROR_OK
)
1707 return aarch64_init_debug_access(target
);
1710 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1711 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1713 struct armv8_common
*armv8
= target_to_armv8(target
);
1714 struct arm_dpm
*dpm
= &armv8
->dpm
;
1715 struct arm
*arm
= &armv8
->arm
;
1718 armv8_reg_current(arm
, 1)->dirty
= true;
1720 /* change DCC to normal mode if necessary */
1721 if (*dscr
& DSCR_MA
) {
1723 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1724 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1725 if (retval
!= ERROR_OK
)
1730 uint32_t data
, opcode
;
1732 /* write the data to store into DTRRX */
1736 data
= target_buffer_get_u16(target
, buffer
);
1738 data
= target_buffer_get_u32(target
, buffer
);
1739 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1740 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1741 if (retval
!= ERROR_OK
)
1744 if (arm
->core_state
== ARM_STATE_AARCH64
)
1745 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1747 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1748 if (retval
!= ERROR_OK
)
1752 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1754 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1756 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1757 retval
= dpm
->instr_execute(dpm
, opcode
);
1758 if (retval
!= ERROR_OK
)
1769 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1770 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1772 struct armv8_common
*armv8
= target_to_armv8(target
);
1773 struct arm
*arm
= &armv8
->arm
;
1776 armv8_reg_current(arm
, 1)->dirty
= true;
1778 /* Step 1.d - Change DCC to memory mode */
1780 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1781 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1782 if (retval
!= ERROR_OK
)
1786 /* Step 2.a - Do the write */
1787 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1788 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1789 if (retval
!= ERROR_OK
)
1792 /* Step 3.a - Switch DTR mode back to Normal mode */
1794 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1795 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1796 if (retval
!= ERROR_OK
)
1802 static int aarch64_write_cpu_memory(struct target
*target
,
1803 uint64_t address
, uint32_t size
,
1804 uint32_t count
, const uint8_t *buffer
)
1806 /* write memory through APB-AP */
1807 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1808 struct armv8_common
*armv8
= target_to_armv8(target
);
1809 struct arm_dpm
*dpm
= &armv8
->dpm
;
1810 struct arm
*arm
= &armv8
->arm
;
1813 if (target
->state
!= TARGET_HALTED
) {
1814 LOG_WARNING("target not halted");
1815 return ERROR_TARGET_NOT_HALTED
;
1818 /* Mark register X0 as dirty, as it will be used
1819 * for transferring the data.
1820 * It will be restored automatically when exiting
1823 armv8_reg_current(arm
, 0)->dirty
= true;
1825 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1828 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1829 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1830 if (retval
!= ERROR_OK
)
1833 /* Set Normal access mode */
1834 dscr
= (dscr
& ~DSCR_MA
);
1835 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1836 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1837 if (retval
!= ERROR_OK
)
1840 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1841 /* Write X0 with value 'address' using write procedure */
1842 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1843 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1844 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1845 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1847 /* Write R0 with value 'address' using write procedure */
1848 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1849 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1850 retval
= dpm
->instr_write_data_dcc(dpm
,
1851 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1854 if (retval
!= ERROR_OK
)
1857 if (size
== 4 && (address
% 4) == 0)
1858 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1860 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1862 if (retval
!= ERROR_OK
) {
1863 /* Unset DTR mode */
1864 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1865 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1867 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1868 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1871 /* Check for sticky abort flags in the DSCR */
1872 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1873 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1874 if (retval
!= ERROR_OK
)
1878 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1879 /* Abort occurred - clear it and exit */
1880 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1881 armv8_dpm_handle_exception(dpm
, true);
1889 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1890 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1892 struct armv8_common
*armv8
= target_to_armv8(target
);
1893 struct arm_dpm
*dpm
= &armv8
->dpm
;
1894 struct arm
*arm
= &armv8
->arm
;
1897 armv8_reg_current(arm
, 1)->dirty
= true;
1899 /* change DCC to normal mode (if necessary) */
1900 if (*dscr
& DSCR_MA
) {
1902 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1903 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1904 if (retval
!= ERROR_OK
)
1909 uint32_t opcode
, data
;
1912 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1914 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1916 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1917 retval
= dpm
->instr_execute(dpm
, opcode
);
1918 if (retval
!= ERROR_OK
)
1921 if (arm
->core_state
== ARM_STATE_AARCH64
)
1922 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1924 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1925 if (retval
!= ERROR_OK
)
1928 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1929 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1930 if (retval
!= ERROR_OK
)
1934 *buffer
= (uint8_t)data
;
1936 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1938 target_buffer_set_u32(target
, buffer
, data
);
1948 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1949 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1951 struct armv8_common
*armv8
= target_to_armv8(target
);
1952 struct arm_dpm
*dpm
= &armv8
->dpm
;
1953 struct arm
*arm
= &armv8
->arm
;
1957 /* Mark X1 as dirty */
1958 armv8_reg_current(arm
, 1)->dirty
= true;
1960 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1961 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1962 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1964 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1965 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1968 if (retval
!= ERROR_OK
)
1971 /* Step 1.e - Change DCC to memory mode */
1973 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1974 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1975 if (retval
!= ERROR_OK
)
1978 /* Step 1.f - read DBGDTRTX and discard the value */
1979 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1980 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1981 if (retval
!= ERROR_OK
)
1985 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1986 * Abort flags are sticky, so can be read at end of transactions
1988 * This data is read in aligned to 32 bit boundary.
1992 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1993 * increments X0 by 4. */
1994 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
1995 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1996 if (retval
!= ERROR_OK
)
2000 /* Step 3.a - set DTR access mode back to Normal mode */
2002 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2003 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2004 if (retval
!= ERROR_OK
)
2007 /* Step 3.b - read DBGDTRTX for the final value */
2008 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2009 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2010 if (retval
!= ERROR_OK
)
2013 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2017 static int aarch64_read_cpu_memory(struct target
*target
,
2018 target_addr_t address
, uint32_t size
,
2019 uint32_t count
, uint8_t *buffer
)
2021 /* read memory through APB-AP */
2022 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2023 struct armv8_common
*armv8
= target_to_armv8(target
);
2024 struct arm_dpm
*dpm
= &armv8
->dpm
;
2025 struct arm
*arm
= &armv8
->arm
;
2028 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2029 address
, size
, count
);
2031 if (target
->state
!= TARGET_HALTED
) {
2032 LOG_WARNING("target not halted");
2033 return ERROR_TARGET_NOT_HALTED
;
2036 /* Mark register X0 as dirty, as it will be used
2037 * for transferring the data.
2038 * It will be restored automatically when exiting
2041 armv8_reg_current(arm
, 0)->dirty
= true;
2044 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2045 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2046 if (retval
!= ERROR_OK
)
2049 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2051 /* Set Normal access mode */
2053 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2054 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2055 if (retval
!= ERROR_OK
)
2058 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2059 /* Write X0 with value 'address' using write procedure */
2060 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2061 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2062 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2063 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2065 /* Write R0 with value 'address' using write procedure */
2066 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2067 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2068 retval
= dpm
->instr_write_data_dcc(dpm
,
2069 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2072 if (retval
!= ERROR_OK
)
2075 if (size
== 4 && (address
% 4) == 0)
2076 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2078 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2080 if (dscr
& DSCR_MA
) {
2082 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2083 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2086 if (retval
!= ERROR_OK
)
2089 /* Check for sticky abort flags in the DSCR */
2090 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2091 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2092 if (retval
!= ERROR_OK
)
2097 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2098 /* Abort occurred - clear it and exit */
2099 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2100 armv8_dpm_handle_exception(dpm
, true);
2108 static int aarch64_read_phys_memory(struct target
*target
,
2109 target_addr_t address
, uint32_t size
,
2110 uint32_t count
, uint8_t *buffer
)
2112 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2114 if (count
&& buffer
) {
2115 /* read memory through APB-AP */
2116 retval
= aarch64_mmu_modify(target
, 0);
2117 if (retval
!= ERROR_OK
)
2119 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2124 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2125 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2127 int mmu_enabled
= 0;
2130 /* determine if MMU was enabled on target stop */
2131 retval
= aarch64_mmu(target
, &mmu_enabled
);
2132 if (retval
!= ERROR_OK
)
2136 /* enable MMU as we could have disabled it for phys access */
2137 retval
= aarch64_mmu_modify(target
, 1);
2138 if (retval
!= ERROR_OK
)
2141 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2144 static int aarch64_write_phys_memory(struct target
*target
,
2145 target_addr_t address
, uint32_t size
,
2146 uint32_t count
, const uint8_t *buffer
)
2148 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2150 if (count
&& buffer
) {
2151 /* write memory through APB-AP */
2152 retval
= aarch64_mmu_modify(target
, 0);
2153 if (retval
!= ERROR_OK
)
2155 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2161 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2162 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2164 int mmu_enabled
= 0;
2167 /* determine if MMU was enabled on target stop */
2168 retval
= aarch64_mmu(target
, &mmu_enabled
);
2169 if (retval
!= ERROR_OK
)
2173 /* enable MMU as we could have disabled it for phys access */
2174 retval
= aarch64_mmu_modify(target
, 1);
2175 if (retval
!= ERROR_OK
)
2178 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2181 static int aarch64_handle_target_request(void *priv
)
2183 struct target
*target
= priv
;
2184 struct armv8_common
*armv8
= target_to_armv8(target
);
2187 if (!target_was_examined(target
))
2189 if (!target
->dbg_msg_enabled
)
2192 if (target
->state
== TARGET_RUNNING
) {
2195 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2196 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2198 /* check if we have data */
2199 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2200 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2201 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2202 if (retval
== ERROR_OK
) {
2203 target_request(target
, request
);
2204 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2205 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2213 static int aarch64_examine_first(struct target
*target
)
2215 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2216 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2217 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2218 struct aarch64_private_config
*pc
;
2220 int retval
= ERROR_OK
;
2221 uint64_t debug
, ttypr
;
2223 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2224 debug
= ttypr
= cpuid
= 0;
2226 /* Search for the APB-AB - it is needed for access to debug registers */
2227 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2228 if (retval
!= ERROR_OK
) {
2229 LOG_ERROR("Could not find APB-AP for debug access");
2233 retval
= mem_ap_init(armv8
->debug_ap
);
2234 if (retval
!= ERROR_OK
) {
2235 LOG_ERROR("Could not initialize the APB-AP");
2239 armv8
->debug_ap
->memaccess_tck
= 10;
2241 if (!target
->dbgbase_set
) {
2243 /* Get ROM Table base */
2245 int32_t coreidx
= target
->coreid
;
2246 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2247 if (retval
!= ERROR_OK
)
2249 /* Lookup 0x15 -- Processor DAP */
2250 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2251 &armv8
->debug_base
, &coreidx
);
2252 if (retval
!= ERROR_OK
)
2254 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2255 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2257 armv8
->debug_base
= target
->dbgbase
;
2259 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2260 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2261 if (retval
!= ERROR_OK
) {
2262 LOG_DEBUG("Examine %s failed", "oslock");
2266 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2267 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2268 if (retval
!= ERROR_OK
) {
2269 LOG_DEBUG("Examine %s failed", "CPUID");
2273 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2274 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2275 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2276 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2277 if (retval
!= ERROR_OK
) {
2278 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2281 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2282 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2283 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2284 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2285 if (retval
!= ERROR_OK
) {
2286 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2290 retval
= dap_run(armv8
->debug_ap
->dap
);
2291 if (retval
!= ERROR_OK
) {
2292 LOG_ERROR("%s: examination failed\n", target_name(target
));
2297 ttypr
= (ttypr
<< 32) | tmp0
;
2299 debug
= (debug
<< 32) | tmp2
;
2301 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2302 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2303 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2305 if (target
->private_config
== NULL
)
2308 pc
= (struct aarch64_private_config
*)target
->private_config
;
2309 if (pc
->cti
== NULL
)
2312 armv8
->cti
= pc
->cti
;
2314 retval
= aarch64_dpm_setup(aarch64
, debug
);
2315 if (retval
!= ERROR_OK
)
2318 /* Setup Breakpoint Register Pairs */
2319 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2320 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2321 aarch64
->brp_num_available
= aarch64
->brp_num
;
2322 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2323 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2324 aarch64
->brp_list
[i
].used
= 0;
2325 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2326 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2328 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2329 aarch64
->brp_list
[i
].value
= 0;
2330 aarch64
->brp_list
[i
].control
= 0;
2331 aarch64
->brp_list
[i
].BRPn
= i
;
2334 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2336 target
->state
= TARGET_UNKNOWN
;
2337 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2338 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2339 target_set_examined(target
);
2343 static int aarch64_examine(struct target
*target
)
2345 int retval
= ERROR_OK
;
2347 /* don't re-probe hardware after each reset */
2348 if (!target_was_examined(target
))
2349 retval
= aarch64_examine_first(target
);
2351 /* Configure core debug access */
2352 if (retval
== ERROR_OK
)
2353 retval
= aarch64_init_debug_access(target
);
2359 * Cortex-A8 target creation and initialization
2362 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2363 struct target
*target
)
2365 /* examine_first() does a bunch of this */
2366 arm_semihosting_init(target
);
2370 static int aarch64_init_arch_info(struct target
*target
,
2371 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2373 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2375 /* Setup struct aarch64_common */
2376 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2377 armv8
->arm
.dap
= dap
;
2379 /* register arch-specific functions */
2380 armv8
->examine_debug_reason
= NULL
;
2381 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2382 armv8
->pre_restore_context
= NULL
;
2383 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2385 armv8_init_arch_info(target
, armv8
);
2386 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2391 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2393 struct aarch64_private_config
*pc
= target
->private_config
;
2394 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2396 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2399 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2402 static void aarch64_deinit_target(struct target
*target
)
2404 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2405 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2406 struct arm_dpm
*dpm
= &armv8
->dpm
;
2408 armv8_free_reg_cache(target
);
2409 free(aarch64
->brp_list
);
2412 free(target
->private_config
);
2416 static int aarch64_mmu(struct target
*target
, int *enabled
)
2418 if (target
->state
!= TARGET_HALTED
) {
2419 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2420 return ERROR_TARGET_INVALID
;
2423 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2427 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2428 target_addr_t
*phys
)
2430 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2434 * private target configuration items
2436 enum aarch64_cfg_param
{
2440 static const Jim_Nvp nvp_config_opts
[] = {
2441 { .name
= "-cti", .value
= CFG_CTI
},
2442 { .name
= NULL
, .value
= -1 }
2445 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2447 struct aarch64_private_config
*pc
;
2451 pc
= (struct aarch64_private_config
*)target
->private_config
;
2453 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2454 target
->private_config
= pc
;
2458 * Call adiv5_jim_configure() to parse the common DAP options
2459 * It will return JIM_CONTINUE if it didn't find any known
2460 * options, JIM_OK if it correctly parsed the topmost option
2461 * and JIM_ERR if an error occured during parameter evaluation.
2462 * For JIM_CONTINUE, we check our own params.
2464 e
= adiv5_jim_configure(target
, goi
);
2465 if (e
!= JIM_CONTINUE
)
2468 /* parse config or cget options ... */
2469 if (goi
->argc
> 0) {
2470 Jim_SetEmptyResult(goi
->interp
);
2472 /* check first if topmost item is for us */
2473 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2476 return JIM_CONTINUE
;
2478 e
= Jim_GetOpt_Obj(goi
, NULL
);
2484 if (goi
->isconfigure
) {
2486 struct arm_cti
*cti
;
2487 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2490 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2492 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2497 if (goi
->argc
!= 0) {
2498 Jim_WrongNumArgs(goi
->interp
,
2499 goi
->argc
, goi
->argv
,
2504 if (pc
== NULL
|| pc
->cti
== NULL
) {
2505 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2508 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2514 return JIM_CONTINUE
;
2521 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2523 struct target
*target
= get_current_target(CMD_CTX
);
2524 struct armv8_common
*armv8
= target_to_armv8(target
);
2526 return armv8_handle_cache_info_command(CMD_CTX
,
2527 &armv8
->armv8_mmu
.armv8_cache
);
2531 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2533 struct target
*target
= get_current_target(CMD_CTX
);
2534 if (!target_was_examined(target
)) {
2535 LOG_ERROR("target not examined yet");
2539 return aarch64_init_debug_access(target
);
2541 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2543 struct target
*target
= get_current_target(CMD_CTX
);
2544 /* check target is an smp target */
2545 struct target_list
*head
;
2546 struct target
*curr
;
2547 head
= target
->head
;
2549 if (head
!= (struct target_list
*)NULL
) {
2550 while (head
!= (struct target_list
*)NULL
) {
2551 curr
= head
->target
;
2555 /* fixes the target display to the debugger */
2556 target
->gdb_service
->target
= target
;
2561 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2563 struct target
*target
= get_current_target(CMD_CTX
);
2564 struct target_list
*head
;
2565 struct target
*curr
;
2566 head
= target
->head
;
2567 if (head
!= (struct target_list
*)NULL
) {
2569 while (head
!= (struct target_list
*)NULL
) {
2570 curr
= head
->target
;
2578 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2580 struct target
*target
= get_current_target(CMD_CTX
);
2581 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2583 static const Jim_Nvp nvp_maskisr_modes
[] = {
2584 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2585 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2586 { .name
= NULL
, .value
= -1 },
2591 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2592 if (n
->name
== NULL
) {
2593 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2594 return ERROR_COMMAND_SYNTAX_ERROR
;
2597 aarch64
->isrmasking_mode
= n
->value
;
2600 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2601 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2606 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2608 struct command_context
*context
;
2609 struct target
*target
;
2612 bool is_mcr
= false;
2615 if (Jim_CompareStringImmediate(interp
, argv
[0], "mcr")) {
2622 context
= current_command_context(interp
);
2623 assert(context
!= NULL
);
2625 target
= get_current_target(context
);
2626 if (target
== NULL
) {
2627 LOG_ERROR("%s: no current target", __func__
);
2630 if (!target_was_examined(target
)) {
2631 LOG_ERROR("%s: not yet examined", target_name(target
));
2635 arm
= target_to_arm(target
);
2637 LOG_ERROR("%s: not an ARM", target_name(target
));
2641 if (target
->state
!= TARGET_HALTED
)
2642 return ERROR_TARGET_NOT_HALTED
;
2644 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2645 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
2649 if (argc
!= arg_cnt
) {
2650 LOG_ERROR("%s: wrong number of arguments", __func__
);
2662 /* NOTE: parameter sequence matches ARM instruction set usage:
2663 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2664 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2665 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2667 retval
= Jim_GetLong(interp
, argv
[1], &l
);
2668 if (retval
!= JIM_OK
)
2671 LOG_ERROR("%s: %s %d out of range", __func__
,
2672 "coprocessor", (int) l
);
2677 retval
= Jim_GetLong(interp
, argv
[2], &l
);
2678 if (retval
!= JIM_OK
)
2681 LOG_ERROR("%s: %s %d out of range", __func__
,
2687 retval
= Jim_GetLong(interp
, argv
[3], &l
);
2688 if (retval
!= JIM_OK
)
2691 LOG_ERROR("%s: %s %d out of range", __func__
,
2697 retval
= Jim_GetLong(interp
, argv
[4], &l
);
2698 if (retval
!= JIM_OK
)
2701 LOG_ERROR("%s: %s %d out of range", __func__
,
2707 retval
= Jim_GetLong(interp
, argv
[5], &l
);
2708 if (retval
!= JIM_OK
)
2711 LOG_ERROR("%s: %s %d out of range", __func__
,
2719 if (is_mcr
== true) {
2720 retval
= Jim_GetLong(interp
, argv
[6], &l
);
2721 if (retval
!= JIM_OK
)
2725 /* NOTE: parameters reordered! */
2726 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2727 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
2728 if (retval
!= ERROR_OK
)
2731 /* NOTE: parameters reordered! */
2732 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2733 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
2734 if (retval
!= ERROR_OK
)
2737 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
2743 static const struct command_registration aarch64_exec_command_handlers
[] = {
2745 .name
= "cache_info",
2746 .handler
= aarch64_handle_cache_info_command
,
2747 .mode
= COMMAND_EXEC
,
2748 .help
= "display information about target caches",
2753 .handler
= aarch64_handle_dbginit_command
,
2754 .mode
= COMMAND_EXEC
,
2755 .help
= "Initialize core debug",
2758 { .name
= "smp_off",
2759 .handler
= aarch64_handle_smp_off_command
,
2760 .mode
= COMMAND_EXEC
,
2761 .help
= "Stop smp handling",
2766 .handler
= aarch64_handle_smp_on_command
,
2767 .mode
= COMMAND_EXEC
,
2768 .help
= "Restart smp handling",
2773 .handler
= aarch64_mask_interrupts_command
,
2774 .mode
= COMMAND_ANY
,
2775 .help
= "mask aarch64 interrupts during single-step",
2776 .usage
= "['on'|'off']",
2780 .mode
= COMMAND_EXEC
,
2781 .jim_handler
= jim_mcrmrc
,
2782 .help
= "write coprocessor register",
2783 .usage
= "cpnum op1 CRn CRm op2 value",
2787 .mode
= COMMAND_EXEC
,
2788 .jim_handler
= jim_mcrmrc
,
2789 .help
= "read coprocessor register",
2790 .usage
= "cpnum op1 CRn CRm op2",
2794 COMMAND_REGISTRATION_DONE
2797 static const struct command_registration aarch64_command_handlers
[] = {
2799 .chain
= armv8_command_handlers
,
2803 .mode
= COMMAND_ANY
,
2804 .help
= "Aarch64 command group",
2806 .chain
= aarch64_exec_command_handlers
,
2808 COMMAND_REGISTRATION_DONE
2811 struct target_type aarch64_target
= {
2814 .poll
= aarch64_poll
,
2815 .arch_state
= armv8_arch_state
,
2817 .halt
= aarch64_halt
,
2818 .resume
= aarch64_resume
,
2819 .step
= aarch64_step
,
2821 .assert_reset
= aarch64_assert_reset
,
2822 .deassert_reset
= aarch64_deassert_reset
,
2824 /* REVISIT allow exporting VFP3 registers ... */
2825 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2827 .read_memory
= aarch64_read_memory
,
2828 .write_memory
= aarch64_write_memory
,
2830 .add_breakpoint
= aarch64_add_breakpoint
,
2831 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2832 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2833 .remove_breakpoint
= aarch64_remove_breakpoint
,
2834 .add_watchpoint
= NULL
,
2835 .remove_watchpoint
= NULL
,
2837 .commands
= aarch64_command_handlers
,
2838 .target_create
= aarch64_target_create
,
2839 .target_jim_configure
= aarch64_jim_configure
,
2840 .init_target
= aarch64_init_target
,
2841 .deinit_target
= aarch64_deinit_target
,
2842 .examine
= aarch64_examine
,
2844 .read_phys_memory
= aarch64_read_phys_memory
,
2845 .write_phys_memory
= aarch64_write_phys_memory
,
2847 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)