1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include <helper/time_support.h>
44 struct aarch64_private_config
{
45 struct adiv5_private_config adiv5_config
;
49 static int aarch64_poll(struct target
*target
);
50 static int aarch64_debug_entry(struct target
*target
);
51 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
52 static int aarch64_set_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
, uint8_t matchmode
);
54 static int aarch64_set_context_breakpoint(struct target
*target
,
55 struct breakpoint
*breakpoint
, uint8_t matchmode
);
56 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
57 struct breakpoint
*breakpoint
);
58 static int aarch64_unset_breakpoint(struct target
*target
,
59 struct breakpoint
*breakpoint
);
60 static int aarch64_mmu(struct target
*target
, int *enabled
);
61 static int aarch64_virt2phys(struct target
*target
,
62 target_addr_t virt
, target_addr_t
*phys
);
63 static int aarch64_read_cpu_memory(struct target
*target
,
64 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
66 #define foreach_smp_target(pos, head) \
67 for (pos = head; (pos != NULL); pos = pos->next)
69 static int aarch64_restore_system_control_reg(struct target
*target
)
71 enum arm_mode target_mode
= ARM_MODE_ANY
;
72 int retval
= ERROR_OK
;
75 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
76 struct armv8_common
*armv8
= target_to_armv8(target
);
78 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
79 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82 switch (armv8
->arm
.core_mode
) {
84 target_mode
= ARMV8_64_EL1H
;
88 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
92 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
96 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
103 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
107 LOG_INFO("cannot read system control register in this mode");
111 if (target_mode
!= ARM_MODE_ANY
)
112 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
114 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
115 if (retval
!= ERROR_OK
)
118 if (target_mode
!= ARM_MODE_ANY
)
119 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
125 /* modify system_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int aarch64_mmu_modify(struct target
*target
, int enable
)
130 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
131 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
132 int retval
= ERROR_OK
;
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(aarch64
->system_control_reg
& 0x1U
)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
141 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
142 aarch64
->system_control_reg_curr
|= 0x1U
;
144 if (aarch64
->system_control_reg_curr
& 0x4U
) {
145 /* data cache is active */
146 aarch64
->system_control_reg_curr
&= ~0x4U
;
147 /* flush data cache armv8 function to be called */
148 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
149 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
151 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
152 aarch64
->system_control_reg_curr
&= ~0x1U
;
156 switch (armv8
->arm
.core_mode
) {
160 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
164 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
168 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
175 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
179 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
183 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
184 aarch64
->system_control_reg_curr
);
189 * Basic debug access, very low level assumes state is saved
191 static int aarch64_init_debug_access(struct target
*target
)
193 struct armv8_common
*armv8
= target_to_armv8(target
);
197 LOG_DEBUG("%s", target_name(target
));
199 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
200 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
201 if (retval
!= ERROR_OK
) {
202 LOG_DEBUG("Examine %s failed", "oslock");
206 /* Clear Sticky Power Down status Bit in PRSR to enable access to
207 the registers in the Core Power Domain */
208 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
209 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
210 if (retval
!= ERROR_OK
)
214 * Static CTI configuration:
215 * Channel 0 -> trigger outputs HALT request to PE
216 * Channel 1 -> trigger outputs Resume request to PE
217 * Gate all channel trigger events from entering the CTM
221 retval
= arm_cti_enable(armv8
->cti
, true);
222 /* By default, gate all channel events to and from the CTM */
223 if (retval
== ERROR_OK
)
224 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
225 /* output halt requests to PE on channel 0 event */
226 if (retval
== ERROR_OK
)
227 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
228 /* output restart requests to PE on channel 1 event */
229 if (retval
== ERROR_OK
)
230 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
231 if (retval
!= ERROR_OK
)
234 /* Resync breakpoint registers */
239 /* Write to memory mapped registers directly with no cache or mmu handling */
240 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
245 struct armv8_common
*armv8
= target_to_armv8(target
);
247 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
252 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
254 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
257 dpm
->arm
= &a8
->armv8_common
.arm
;
260 retval
= armv8_dpm_setup(dpm
);
261 if (retval
== ERROR_OK
)
262 retval
= armv8_dpm_initialize(dpm
);
267 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
269 struct armv8_common
*armv8
= target_to_armv8(target
);
270 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
273 static int aarch64_check_state_one(struct target
*target
,
274 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
276 struct armv8_common
*armv8
= target_to_armv8(target
);
280 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
281 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
282 if (retval
!= ERROR_OK
)
289 *p_result
= (prsr
& mask
) == (val
& mask
);
294 static int aarch64_wait_halt_one(struct target
*target
)
296 int retval
= ERROR_OK
;
299 int64_t then
= timeval_ms();
303 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
304 if (retval
!= ERROR_OK
|| halted
)
307 if (timeval_ms() > then
+ 1000) {
308 retval
= ERROR_TARGET_TIMEOUT
;
309 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
316 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
318 int retval
= ERROR_OK
;
319 struct target_list
*head
= target
->head
;
320 struct target
*first
= NULL
;
322 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
324 while (head
!= NULL
) {
325 struct target
*curr
= head
->target
;
326 struct armv8_common
*armv8
= target_to_armv8(curr
);
329 if (exc_target
&& curr
== target
)
331 if (!target_was_examined(curr
))
333 if (curr
->state
!= TARGET_RUNNING
)
336 /* HACK: mark this target as prepared for halting */
337 curr
->debug_reason
= DBG_REASON_DBGRQ
;
339 /* open the gate for channel 0 to let HALT requests pass to the CTM */
340 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
341 if (retval
== ERROR_OK
)
342 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
343 if (retval
!= ERROR_OK
)
346 LOG_DEBUG("target %s prepared", target_name(curr
));
353 if (exc_target
&& first
)
362 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
364 int retval
= ERROR_OK
;
365 struct armv8_common
*armv8
= target_to_armv8(target
);
367 LOG_DEBUG("%s", target_name(target
));
369 /* allow Halting Debug Mode */
370 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
371 if (retval
!= ERROR_OK
)
374 /* trigger an event on channel 0, this outputs a halt request to the PE */
375 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
376 if (retval
!= ERROR_OK
)
379 if (mode
== HALT_SYNC
) {
380 retval
= aarch64_wait_halt_one(target
);
381 if (retval
!= ERROR_OK
) {
382 if (retval
== ERROR_TARGET_TIMEOUT
)
383 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
391 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
393 struct target
*next
= target
;
396 /* prepare halt on all PEs of the group */
397 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
399 if (exc_target
&& next
== target
)
402 /* halt the target PE */
403 if (retval
== ERROR_OK
)
404 retval
= aarch64_halt_one(next
, HALT_LAZY
);
406 if (retval
!= ERROR_OK
)
409 /* wait for all PEs to halt */
410 int64_t then
= timeval_ms();
412 bool all_halted
= true;
413 struct target_list
*head
;
416 foreach_smp_target(head
, target
->head
) {
421 if (!target_was_examined(curr
))
424 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
425 if (retval
!= ERROR_OK
|| !halted
) {
434 if (timeval_ms() > then
+ 1000) {
435 retval
= ERROR_TARGET_TIMEOUT
;
440 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
441 * and it looks like the CTI's are not connected by a common
442 * trigger matrix. It seems that we need to halt one core in each
443 * cluster explicitly. So if we find that a core has not halted
444 * yet, we trigger an explicit halt for the second cluster.
446 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
447 if (retval
!= ERROR_OK
)
454 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
456 struct target
*gdb_target
= NULL
;
457 struct target_list
*head
;
460 if (debug_reason
== DBG_REASON_NOTHALTED
) {
461 LOG_DEBUG("Halting remaining targets in SMP group");
462 aarch64_halt_smp(target
, true);
465 /* poll all targets in the group, but skip the target that serves GDB */
466 foreach_smp_target(head
, target
->head
) {
468 /* skip calling context */
471 if (!target_was_examined(curr
))
473 /* skip targets that were already halted */
474 if (curr
->state
== TARGET_HALTED
)
476 /* remember the gdb_service->target */
477 if (curr
->gdb_service
!= NULL
)
478 gdb_target
= curr
->gdb_service
->target
;
480 if (curr
== gdb_target
)
483 /* avoid recursion in aarch64_poll() */
489 /* after all targets were updated, poll the gdb serving target */
490 if (gdb_target
!= NULL
&& gdb_target
!= target
)
491 aarch64_poll(gdb_target
);
497 * Aarch64 Run control
500 static int aarch64_poll(struct target
*target
)
502 enum target_state prev_target_state
;
503 int retval
= ERROR_OK
;
506 retval
= aarch64_check_state_one(target
,
507 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
508 if (retval
!= ERROR_OK
)
512 prev_target_state
= target
->state
;
513 if (prev_target_state
!= TARGET_HALTED
) {
514 enum target_debug_reason debug_reason
= target
->debug_reason
;
516 /* We have a halting debug event */
517 target
->state
= TARGET_HALTED
;
518 LOG_DEBUG("Target %s halted", target_name(target
));
519 retval
= aarch64_debug_entry(target
);
520 if (retval
!= ERROR_OK
)
524 update_halt_gdb(target
, debug_reason
);
526 if (arm_semihosting(target
, &retval
) != 0)
529 switch (prev_target_state
) {
533 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
535 case TARGET_DEBUG_RUNNING
:
536 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
543 target
->state
= TARGET_RUNNING
;
548 static int aarch64_halt(struct target
*target
)
550 struct armv8_common
*armv8
= target_to_armv8(target
);
551 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
554 return aarch64_halt_smp(target
, false);
556 return aarch64_halt_one(target
, HALT_SYNC
);
559 static int aarch64_restore_one(struct target
*target
, int current
,
560 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
562 struct armv8_common
*armv8
= target_to_armv8(target
);
563 struct arm
*arm
= &armv8
->arm
;
567 LOG_DEBUG("%s", target_name(target
));
569 if (!debug_execution
)
570 target_free_all_working_areas(target
);
572 /* current = 1: continue on current pc, otherwise continue at <address> */
573 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
575 resume_pc
= *address
;
577 *address
= resume_pc
;
579 /* Make sure that the Armv7 gdb thumb fixups does not
580 * kill the return address
582 switch (arm
->core_state
) {
584 resume_pc
&= 0xFFFFFFFC;
586 case ARM_STATE_AARCH64
:
587 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
589 case ARM_STATE_THUMB
:
590 case ARM_STATE_THUMB_EE
:
591 /* When the return address is loaded into PC
592 * bit 0 must be 1 to stay in Thumb state
596 case ARM_STATE_JAZELLE
:
597 LOG_ERROR("How do I resume into Jazelle state??");
600 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
601 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
605 /* called it now before restoring context because it uses cpu
606 * register r0 for restoring system control register */
607 retval
= aarch64_restore_system_control_reg(target
);
608 if (retval
== ERROR_OK
)
609 retval
= aarch64_restore_context(target
, handle_breakpoints
);
615 * prepare single target for restart
619 static int aarch64_prepare_restart_one(struct target
*target
)
621 struct armv8_common
*armv8
= target_to_armv8(target
);
626 LOG_DEBUG("%s", target_name(target
));
628 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
629 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
630 if (retval
!= ERROR_OK
)
633 if ((dscr
& DSCR_ITE
) == 0)
634 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
635 if ((dscr
& DSCR_ERR
) != 0)
636 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
638 /* acknowledge a pending CTI halt event */
639 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
641 * open the CTI gate for channel 1 so that the restart events
642 * get passed along to all PEs. Also close gate for channel 0
643 * to isolate the PE from halt events.
645 if (retval
== ERROR_OK
)
646 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
647 if (retval
== ERROR_OK
)
648 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
650 /* make sure that DSCR.HDE is set */
651 if (retval
== ERROR_OK
) {
653 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
654 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
657 if (retval
== ERROR_OK
) {
658 /* clear sticky bits in PRSR, SDR is now 0 */
659 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
660 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
666 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
668 struct armv8_common
*armv8
= target_to_armv8(target
);
671 LOG_DEBUG("%s", target_name(target
));
673 /* trigger an event on channel 1, generates a restart request to the PE */
674 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
675 if (retval
!= ERROR_OK
)
678 if (mode
== RESTART_SYNC
) {
679 int64_t then
= timeval_ms();
683 * if PRSR.SDR is set now, the target did restart, even
684 * if it's now already halted again (e.g. due to breakpoint)
686 retval
= aarch64_check_state_one(target
,
687 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
688 if (retval
!= ERROR_OK
|| resumed
)
691 if (timeval_ms() > then
+ 1000) {
692 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
693 retval
= ERROR_TARGET_TIMEOUT
;
699 if (retval
!= ERROR_OK
)
702 target
->debug_reason
= DBG_REASON_NOTHALTED
;
703 target
->state
= TARGET_RUNNING
;
708 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
712 LOG_DEBUG("%s", target_name(target
));
714 retval
= aarch64_prepare_restart_one(target
);
715 if (retval
== ERROR_OK
)
716 retval
= aarch64_do_restart_one(target
, mode
);
722 * prepare all but the current target for restart
724 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
726 int retval
= ERROR_OK
;
727 struct target_list
*head
;
728 struct target
*first
= NULL
;
731 foreach_smp_target(head
, target
->head
) {
732 struct target
*curr
= head
->target
;
734 /* skip calling target */
737 if (!target_was_examined(curr
))
739 if (curr
->state
!= TARGET_HALTED
)
742 /* resume at current address, not in step mode */
743 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
744 if (retval
== ERROR_OK
)
745 retval
= aarch64_prepare_restart_one(curr
);
746 if (retval
!= ERROR_OK
) {
747 LOG_ERROR("failed to restore target %s", target_name(curr
));
750 /* remember the first valid target in the group */
762 static int aarch64_step_restart_smp(struct target
*target
)
764 int retval
= ERROR_OK
;
765 struct target_list
*head
;
766 struct target
*first
= NULL
;
768 LOG_DEBUG("%s", target_name(target
));
770 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
771 if (retval
!= ERROR_OK
)
775 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
776 if (retval
!= ERROR_OK
) {
777 LOG_DEBUG("error restarting target %s", target_name(first
));
781 int64_t then
= timeval_ms();
783 struct target
*curr
= target
;
784 bool all_resumed
= true;
786 foreach_smp_target(head
, target
->head
) {
795 if (!target_was_examined(curr
))
798 retval
= aarch64_check_state_one(curr
,
799 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
800 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
805 if (curr
->state
!= TARGET_RUNNING
) {
806 curr
->state
= TARGET_RUNNING
;
807 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
808 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
815 if (timeval_ms() > then
+ 1000) {
816 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
817 retval
= ERROR_TARGET_TIMEOUT
;
821 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
822 * and it looks like the CTI's are not connected by a common
823 * trigger matrix. It seems that we need to halt one core in each
824 * cluster explicitly. So if we find that a core has not halted
825 * yet, we trigger an explicit resume for the second cluster.
827 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
828 if (retval
!= ERROR_OK
)
835 static int aarch64_resume(struct target
*target
, int current
,
836 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
839 uint64_t addr
= address
;
841 struct armv8_common
*armv8
= target_to_armv8(target
);
842 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
844 if (target
->state
!= TARGET_HALTED
)
845 return ERROR_TARGET_NOT_HALTED
;
848 * If this target is part of a SMP group, prepare the others
849 * targets for resuming. This involves restoring the complete
850 * target register context and setting up CTI gates to accept
851 * resume events from the trigger matrix.
854 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
855 if (retval
!= ERROR_OK
)
859 /* all targets prepared, restore and restart the current target */
860 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
862 if (retval
== ERROR_OK
)
863 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
864 if (retval
!= ERROR_OK
)
868 int64_t then
= timeval_ms();
870 struct target
*curr
= target
;
871 struct target_list
*head
;
872 bool all_resumed
= true;
874 foreach_smp_target(head
, target
->head
) {
881 if (!target_was_examined(curr
))
884 retval
= aarch64_check_state_one(curr
,
885 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
886 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
891 if (curr
->state
!= TARGET_RUNNING
) {
892 curr
->state
= TARGET_RUNNING
;
893 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
894 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
901 if (timeval_ms() > then
+ 1000) {
902 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
903 retval
= ERROR_TARGET_TIMEOUT
;
908 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
909 * and it looks like the CTI's are not connected by a common
910 * trigger matrix. It seems that we need to halt one core in each
911 * cluster explicitly. So if we find that a core has not halted
912 * yet, we trigger an explicit resume for the second cluster.
914 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
915 if (retval
!= ERROR_OK
)
920 if (retval
!= ERROR_OK
)
923 target
->debug_reason
= DBG_REASON_NOTHALTED
;
925 if (!debug_execution
) {
926 target
->state
= TARGET_RUNNING
;
927 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
928 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
930 target
->state
= TARGET_DEBUG_RUNNING
;
931 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
932 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
938 static int aarch64_debug_entry(struct target
*target
)
940 int retval
= ERROR_OK
;
941 struct armv8_common
*armv8
= target_to_armv8(target
);
942 struct arm_dpm
*dpm
= &armv8
->dpm
;
943 enum arm_state core_state
;
946 /* make sure to clear all sticky errors */
947 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
948 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
949 if (retval
== ERROR_OK
)
950 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
951 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
952 if (retval
== ERROR_OK
)
953 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
955 if (retval
!= ERROR_OK
)
958 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
961 core_state
= armv8_dpm_get_core_state(dpm
);
962 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
963 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
965 /* close the CTI gate for all events */
966 if (retval
== ERROR_OK
)
967 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
968 /* discard async exceptions */
969 if (retval
== ERROR_OK
)
970 retval
= dpm
->instr_cpsr_sync(dpm
);
971 if (retval
!= ERROR_OK
)
974 /* Examine debug reason */
975 armv8_dpm_report_dscr(dpm
, dscr
);
977 /* save address of instruction that triggered the watchpoint? */
978 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
982 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
983 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
985 if (retval
!= ERROR_OK
)
989 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
990 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
992 if (retval
!= ERROR_OK
)
995 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
998 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1000 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1001 retval
= armv8
->post_debug_entry(target
);
1006 static int aarch64_post_debug_entry(struct target
*target
)
1008 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1009 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1011 enum arm_mode target_mode
= ARM_MODE_ANY
;
1014 switch (armv8
->arm
.core_mode
) {
1016 target_mode
= ARMV8_64_EL1H
;
1020 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1024 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1028 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1035 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1039 LOG_INFO("cannot read system control register in this mode");
1043 if (target_mode
!= ARM_MODE_ANY
)
1044 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1046 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1047 if (retval
!= ERROR_OK
)
1050 if (target_mode
!= ARM_MODE_ANY
)
1051 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1053 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1054 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1056 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1057 armv8_identify_cache(armv8
);
1058 armv8_read_mpidr(armv8
);
1061 armv8
->armv8_mmu
.mmu_enabled
=
1062 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1063 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1064 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1065 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1066 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1071 * single-step a target
1073 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1074 int handle_breakpoints
)
1076 struct armv8_common
*armv8
= target_to_armv8(target
);
1077 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1078 int saved_retval
= ERROR_OK
;
1082 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1084 if (target
->state
!= TARGET_HALTED
) {
1085 LOG_WARNING("target not halted");
1086 return ERROR_TARGET_NOT_HALTED
;
1089 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1090 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1091 /* make sure EDECR.SS is not set when restoring the register */
1093 if (retval
== ERROR_OK
) {
1095 /* set EDECR.SS to enter hardware step mode */
1096 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1097 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1099 /* disable interrupts while stepping */
1100 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1101 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1102 /* bail out if stepping setup has failed */
1103 if (retval
!= ERROR_OK
)
1106 if (target
->smp
&& (current
== 1)) {
1108 * isolate current target so that it doesn't get resumed
1109 * together with the others
1111 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1112 /* resume all other targets in the group */
1113 if (retval
== ERROR_OK
)
1114 retval
= aarch64_step_restart_smp(target
);
1115 if (retval
!= ERROR_OK
) {
1116 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1119 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1122 /* all other targets running, restore and restart the current target */
1123 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1124 if (retval
== ERROR_OK
)
1125 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1127 if (retval
!= ERROR_OK
)
1130 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1131 if (!handle_breakpoints
)
1132 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1134 int64_t then
= timeval_ms();
1139 retval
= aarch64_check_state_one(target
,
1140 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1141 if (retval
!= ERROR_OK
|| stepped
)
1144 if (timeval_ms() > then
+ 100) {
1145 LOG_ERROR("timeout waiting for target %s halt after step",
1146 target_name(target
));
1147 retval
= ERROR_TARGET_TIMEOUT
;
1153 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1154 * causes a timeout. The core takes the step but doesn't complete it and so
1155 * debug state is never entered. However, you can manually halt the core
1156 * as an external debug even is also a WFI wakeup event.
1158 if (retval
== ERROR_TARGET_TIMEOUT
)
1159 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1162 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1163 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1164 if (retval
!= ERROR_OK
)
1167 /* restore interrupts */
1168 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1169 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1170 if (retval
!= ERROR_OK
)
1174 if (saved_retval
!= ERROR_OK
)
1175 return saved_retval
;
1177 return aarch64_poll(target
);
1180 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1182 struct armv8_common
*armv8
= target_to_armv8(target
);
1183 struct arm
*arm
= &armv8
->arm
;
1187 LOG_DEBUG("%s", target_name(target
));
1189 if (armv8
->pre_restore_context
)
1190 armv8
->pre_restore_context(target
);
1192 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1193 if (retval
== ERROR_OK
) {
1194 /* registers are now invalid */
1195 register_cache_invalidate(arm
->core_cache
);
1196 register_cache_invalidate(arm
->core_cache
->next
);
1203 * Cortex-A8 Breakpoint and watchpoint functions
1206 /* Setup hardware Breakpoint Register Pair */
1207 static int aarch64_set_breakpoint(struct target
*target
,
1208 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1213 uint8_t byte_addr_select
= 0x0F;
1214 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1215 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1216 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1218 if (breakpoint
->set
) {
1219 LOG_WARNING("breakpoint already set");
1223 if (breakpoint
->type
== BKPT_HARD
) {
1225 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1227 if (brp_i
>= aarch64
->brp_num
) {
1228 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1229 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1231 breakpoint
->set
= brp_i
+ 1;
1232 if (breakpoint
->length
== 2)
1233 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1234 control
= ((matchmode
& 0x7) << 20)
1236 | (byte_addr_select
<< 5)
1238 brp_list
[brp_i
].used
= 1;
1239 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1240 brp_list
[brp_i
].control
= control
;
1241 bpt_value
= brp_list
[brp_i
].value
;
1243 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1244 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1245 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1246 if (retval
!= ERROR_OK
)
1248 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1249 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1250 (uint32_t)(bpt_value
>> 32));
1251 if (retval
!= ERROR_OK
)
1254 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1255 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1256 brp_list
[brp_i
].control
);
1257 if (retval
!= ERROR_OK
)
1259 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1260 brp_list
[brp_i
].control
,
1261 brp_list
[brp_i
].value
);
1263 } else if (breakpoint
->type
== BKPT_SOFT
) {
1266 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1267 retval
= target_read_memory(target
,
1268 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1269 breakpoint
->length
, 1,
1270 breakpoint
->orig_instr
);
1271 if (retval
!= ERROR_OK
)
1274 armv8_cache_d_inner_flush_virt(armv8
,
1275 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1276 breakpoint
->length
);
1278 retval
= target_write_memory(target
,
1279 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1280 breakpoint
->length
, 1, code
);
1281 if (retval
!= ERROR_OK
)
1284 armv8_cache_d_inner_flush_virt(armv8
,
1285 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1286 breakpoint
->length
);
1288 armv8_cache_i_inner_inval_virt(armv8
,
1289 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1290 breakpoint
->length
);
1292 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1295 /* Ensure that halting debug mode is enable */
1296 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1297 if (retval
!= ERROR_OK
) {
1298 LOG_DEBUG("Failed to set DSCR.HDE");
1305 static int aarch64_set_context_breakpoint(struct target
*target
,
1306 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1308 int retval
= ERROR_FAIL
;
1311 uint8_t byte_addr_select
= 0x0F;
1312 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1313 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1314 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1316 if (breakpoint
->set
) {
1317 LOG_WARNING("breakpoint already set");
1320 /*check available context BRPs*/
1321 while ((brp_list
[brp_i
].used
||
1322 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1325 if (brp_i
>= aarch64
->brp_num
) {
1326 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1330 breakpoint
->set
= brp_i
+ 1;
1331 control
= ((matchmode
& 0x7) << 20)
1333 | (byte_addr_select
<< 5)
1335 brp_list
[brp_i
].used
= 1;
1336 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1337 brp_list
[brp_i
].control
= control
;
1338 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1339 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1340 brp_list
[brp_i
].value
);
1341 if (retval
!= ERROR_OK
)
1343 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1344 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1345 brp_list
[brp_i
].control
);
1346 if (retval
!= ERROR_OK
)
1348 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1349 brp_list
[brp_i
].control
,
1350 brp_list
[brp_i
].value
);
1355 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1357 int retval
= ERROR_FAIL
;
1358 int brp_1
= 0; /* holds the contextID pair */
1359 int brp_2
= 0; /* holds the IVA pair */
1360 uint32_t control_CTX
, control_IVA
;
1361 uint8_t CTX_byte_addr_select
= 0x0F;
1362 uint8_t IVA_byte_addr_select
= 0x0F;
1363 uint8_t CTX_machmode
= 0x03;
1364 uint8_t IVA_machmode
= 0x01;
1365 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1366 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1367 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1369 if (breakpoint
->set
) {
1370 LOG_WARNING("breakpoint already set");
1373 /*check available context BRPs*/
1374 while ((brp_list
[brp_1
].used
||
1375 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1378 printf("brp(CTX) found num: %d\n", brp_1
);
1379 if (brp_1
>= aarch64
->brp_num
) {
1380 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1384 while ((brp_list
[brp_2
].used
||
1385 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1388 printf("brp(IVA) found num: %d\n", brp_2
);
1389 if (brp_2
>= aarch64
->brp_num
) {
1390 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1394 breakpoint
->set
= brp_1
+ 1;
1395 breakpoint
->linked_BRP
= brp_2
;
1396 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1399 | (CTX_byte_addr_select
<< 5)
1401 brp_list
[brp_1
].used
= 1;
1402 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1403 brp_list
[brp_1
].control
= control_CTX
;
1404 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1405 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1406 brp_list
[brp_1
].value
);
1407 if (retval
!= ERROR_OK
)
1409 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1410 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1411 brp_list
[brp_1
].control
);
1412 if (retval
!= ERROR_OK
)
1415 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1418 | (IVA_byte_addr_select
<< 5)
1420 brp_list
[brp_2
].used
= 1;
1421 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1422 brp_list
[brp_2
].control
= control_IVA
;
1423 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1424 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1425 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1426 if (retval
!= ERROR_OK
)
1428 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1429 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1430 brp_list
[brp_2
].value
>> 32);
1431 if (retval
!= ERROR_OK
)
1433 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1434 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1435 brp_list
[brp_2
].control
);
1436 if (retval
!= ERROR_OK
)
1442 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1445 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1446 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1447 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1449 if (!breakpoint
->set
) {
1450 LOG_WARNING("breakpoint not set");
1454 if (breakpoint
->type
== BKPT_HARD
) {
1455 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1456 int brp_i
= breakpoint
->set
- 1;
1457 int brp_j
= breakpoint
->linked_BRP
;
1458 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1459 LOG_DEBUG("Invalid BRP number in breakpoint");
1462 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1463 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1464 brp_list
[brp_i
].used
= 0;
1465 brp_list
[brp_i
].value
= 0;
1466 brp_list
[brp_i
].control
= 0;
1467 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1468 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1469 brp_list
[brp_i
].control
);
1470 if (retval
!= ERROR_OK
)
1472 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1473 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1474 (uint32_t)brp_list
[brp_i
].value
);
1475 if (retval
!= ERROR_OK
)
1477 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1478 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1479 (uint32_t)brp_list
[brp_i
].value
);
1480 if (retval
!= ERROR_OK
)
1482 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1483 LOG_DEBUG("Invalid BRP number in breakpoint");
1486 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1487 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1488 brp_list
[brp_j
].used
= 0;
1489 brp_list
[brp_j
].value
= 0;
1490 brp_list
[brp_j
].control
= 0;
1491 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1492 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1493 brp_list
[brp_j
].control
);
1494 if (retval
!= ERROR_OK
)
1496 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1497 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1498 (uint32_t)brp_list
[brp_j
].value
);
1499 if (retval
!= ERROR_OK
)
1501 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1502 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1503 (uint32_t)brp_list
[brp_j
].value
);
1504 if (retval
!= ERROR_OK
)
1507 breakpoint
->linked_BRP
= 0;
1508 breakpoint
->set
= 0;
1512 int brp_i
= breakpoint
->set
- 1;
1513 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1514 LOG_DEBUG("Invalid BRP number in breakpoint");
1517 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1518 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1519 brp_list
[brp_i
].used
= 0;
1520 brp_list
[brp_i
].value
= 0;
1521 brp_list
[brp_i
].control
= 0;
1522 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1523 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1524 brp_list
[brp_i
].control
);
1525 if (retval
!= ERROR_OK
)
1527 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1528 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1529 brp_list
[brp_i
].value
);
1530 if (retval
!= ERROR_OK
)
1533 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1534 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1535 (uint32_t)brp_list
[brp_i
].value
);
1536 if (retval
!= ERROR_OK
)
1538 breakpoint
->set
= 0;
1542 /* restore original instruction (kept in target endianness) */
1544 armv8_cache_d_inner_flush_virt(armv8
,
1545 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1546 breakpoint
->length
);
1548 if (breakpoint
->length
== 4) {
1549 retval
= target_write_memory(target
,
1550 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1551 4, 1, breakpoint
->orig_instr
);
1552 if (retval
!= ERROR_OK
)
1555 retval
= target_write_memory(target
,
1556 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1557 2, 1, breakpoint
->orig_instr
);
1558 if (retval
!= ERROR_OK
)
1562 armv8_cache_d_inner_flush_virt(armv8
,
1563 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1564 breakpoint
->length
);
1566 armv8_cache_i_inner_inval_virt(armv8
,
1567 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1568 breakpoint
->length
);
1570 breakpoint
->set
= 0;
1575 static int aarch64_add_breakpoint(struct target
*target
,
1576 struct breakpoint
*breakpoint
)
1578 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1580 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1581 LOG_INFO("no hardware breakpoint available");
1582 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1585 if (breakpoint
->type
== BKPT_HARD
)
1586 aarch64
->brp_num_available
--;
1588 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1591 static int aarch64_add_context_breakpoint(struct target
*target
,
1592 struct breakpoint
*breakpoint
)
1594 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1596 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1597 LOG_INFO("no hardware breakpoint available");
1598 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1601 if (breakpoint
->type
== BKPT_HARD
)
1602 aarch64
->brp_num_available
--;
1604 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1607 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1608 struct breakpoint
*breakpoint
)
1610 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1612 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1613 LOG_INFO("no hardware breakpoint available");
1614 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1617 if (breakpoint
->type
== BKPT_HARD
)
1618 aarch64
->brp_num_available
--;
1620 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1624 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1626 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1629 /* It is perfectly possible to remove breakpoints while the target is running */
1630 if (target
->state
!= TARGET_HALTED
) {
1631 LOG_WARNING("target not halted");
1632 return ERROR_TARGET_NOT_HALTED
;
1636 if (breakpoint
->set
) {
1637 aarch64_unset_breakpoint(target
, breakpoint
);
1638 if (breakpoint
->type
== BKPT_HARD
)
1639 aarch64
->brp_num_available
++;
1646 * Cortex-A8 Reset functions
1649 static int aarch64_assert_reset(struct target
*target
)
1651 struct armv8_common
*armv8
= target_to_armv8(target
);
1655 /* FIXME when halt is requested, make it work somehow... */
1657 /* Issue some kind of warm reset. */
1658 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1659 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1660 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1661 /* REVISIT handle "pulls" cases, if there's
1662 * hardware that needs them to work.
1664 jtag_add_reset(0, 1);
1666 LOG_ERROR("%s: how to reset?", target_name(target
));
1670 /* registers are now invalid */
1671 if (target_was_examined(target
)) {
1672 register_cache_invalidate(armv8
->arm
.core_cache
);
1673 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1676 target
->state
= TARGET_RESET
;
1681 static int aarch64_deassert_reset(struct target
*target
)
1687 /* be certain SRST is off */
1688 jtag_add_reset(0, 0);
1690 if (!target_was_examined(target
))
1693 retval
= aarch64_poll(target
);
1694 if (retval
!= ERROR_OK
)
1697 retval
= aarch64_init_debug_access(target
);
1698 if (retval
!= ERROR_OK
)
1701 if (target
->reset_halt
) {
1702 if (target
->state
!= TARGET_HALTED
) {
1703 LOG_WARNING("%s: ran after reset and before halt ...",
1704 target_name(target
));
1705 retval
= target_halt(target
);
1712 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1713 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1715 struct armv8_common
*armv8
= target_to_armv8(target
);
1716 struct arm_dpm
*dpm
= &armv8
->dpm
;
1717 struct arm
*arm
= &armv8
->arm
;
1720 armv8_reg_current(arm
, 1)->dirty
= true;
1722 /* change DCC to normal mode if necessary */
1723 if (*dscr
& DSCR_MA
) {
1725 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1726 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1727 if (retval
!= ERROR_OK
)
1732 uint32_t data
, opcode
;
1734 /* write the data to store into DTRRX */
1738 data
= target_buffer_get_u16(target
, buffer
);
1740 data
= target_buffer_get_u32(target
, buffer
);
1741 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1742 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1743 if (retval
!= ERROR_OK
)
1746 if (arm
->core_state
== ARM_STATE_AARCH64
)
1747 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1749 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1750 if (retval
!= ERROR_OK
)
1754 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1756 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1758 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1759 retval
= dpm
->instr_execute(dpm
, opcode
);
1760 if (retval
!= ERROR_OK
)
1771 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1772 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1774 struct armv8_common
*armv8
= target_to_armv8(target
);
1775 struct arm
*arm
= &armv8
->arm
;
1778 armv8_reg_current(arm
, 1)->dirty
= true;
1780 /* Step 1.d - Change DCC to memory mode */
1782 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1783 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1784 if (retval
!= ERROR_OK
)
1788 /* Step 2.a - Do the write */
1789 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1790 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1791 if (retval
!= ERROR_OK
)
1794 /* Step 3.a - Switch DTR mode back to Normal mode */
1796 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1797 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1798 if (retval
!= ERROR_OK
)
1804 static int aarch64_write_cpu_memory(struct target
*target
,
1805 uint64_t address
, uint32_t size
,
1806 uint32_t count
, const uint8_t *buffer
)
1808 /* write memory through APB-AP */
1809 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1810 struct armv8_common
*armv8
= target_to_armv8(target
);
1811 struct arm_dpm
*dpm
= &armv8
->dpm
;
1812 struct arm
*arm
= &armv8
->arm
;
1815 if (target
->state
!= TARGET_HALTED
) {
1816 LOG_WARNING("target not halted");
1817 return ERROR_TARGET_NOT_HALTED
;
1820 /* Mark register X0 as dirty, as it will be used
1821 * for transferring the data.
1822 * It will be restored automatically when exiting
1825 armv8_reg_current(arm
, 0)->dirty
= true;
1827 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1830 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1831 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1832 if (retval
!= ERROR_OK
)
1835 /* Set Normal access mode */
1836 dscr
= (dscr
& ~DSCR_MA
);
1837 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1838 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1839 if (retval
!= ERROR_OK
)
1842 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1843 /* Write X0 with value 'address' using write procedure */
1844 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1845 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1846 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1847 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1849 /* Write R0 with value 'address' using write procedure */
1850 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1851 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1852 retval
= dpm
->instr_write_data_dcc(dpm
,
1853 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1856 if (retval
!= ERROR_OK
)
1859 if (size
== 4 && (address
% 4) == 0)
1860 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1862 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1864 if (retval
!= ERROR_OK
) {
1865 /* Unset DTR mode */
1866 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1867 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1869 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1870 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1873 /* Check for sticky abort flags in the DSCR */
1874 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1875 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1876 if (retval
!= ERROR_OK
)
1880 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1881 /* Abort occurred - clear it and exit */
1882 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1883 armv8_dpm_handle_exception(dpm
, true);
1891 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1892 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1894 struct armv8_common
*armv8
= target_to_armv8(target
);
1895 struct arm_dpm
*dpm
= &armv8
->dpm
;
1896 struct arm
*arm
= &armv8
->arm
;
1899 armv8_reg_current(arm
, 1)->dirty
= true;
1901 /* change DCC to normal mode (if necessary) */
1902 if (*dscr
& DSCR_MA
) {
1904 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1905 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1906 if (retval
!= ERROR_OK
)
1911 uint32_t opcode
, data
;
1914 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1916 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1918 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1919 retval
= dpm
->instr_execute(dpm
, opcode
);
1920 if (retval
!= ERROR_OK
)
1923 if (arm
->core_state
== ARM_STATE_AARCH64
)
1924 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1926 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1927 if (retval
!= ERROR_OK
)
1930 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1931 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1932 if (retval
!= ERROR_OK
)
1936 *buffer
= (uint8_t)data
;
1938 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1940 target_buffer_set_u32(target
, buffer
, data
);
1950 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1951 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1953 struct armv8_common
*armv8
= target_to_armv8(target
);
1954 struct arm_dpm
*dpm
= &armv8
->dpm
;
1955 struct arm
*arm
= &armv8
->arm
;
1959 /* Mark X1 as dirty */
1960 armv8_reg_current(arm
, 1)->dirty
= true;
1962 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1963 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1964 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1966 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1967 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1970 if (retval
!= ERROR_OK
)
1973 /* Step 1.e - Change DCC to memory mode */
1975 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1976 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1977 if (retval
!= ERROR_OK
)
1980 /* Step 1.f - read DBGDTRTX and discard the value */
1981 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1982 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1983 if (retval
!= ERROR_OK
)
1987 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1988 * Abort flags are sticky, so can be read at end of transactions
1990 * This data is read in aligned to 32 bit boundary.
1994 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1995 * increments X0 by 4. */
1996 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
1997 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1998 if (retval
!= ERROR_OK
)
2002 /* Step 3.a - set DTR access mode back to Normal mode */
2004 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2005 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2006 if (retval
!= ERROR_OK
)
2009 /* Step 3.b - read DBGDTRTX for the final value */
2010 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2011 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2012 if (retval
!= ERROR_OK
)
2015 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2019 static int aarch64_read_cpu_memory(struct target
*target
,
2020 target_addr_t address
, uint32_t size
,
2021 uint32_t count
, uint8_t *buffer
)
2023 /* read memory through APB-AP */
2024 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2025 struct armv8_common
*armv8
= target_to_armv8(target
);
2026 struct arm_dpm
*dpm
= &armv8
->dpm
;
2027 struct arm
*arm
= &armv8
->arm
;
2030 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2031 address
, size
, count
);
2033 if (target
->state
!= TARGET_HALTED
) {
2034 LOG_WARNING("target not halted");
2035 return ERROR_TARGET_NOT_HALTED
;
2038 /* Mark register X0 as dirty, as it will be used
2039 * for transferring the data.
2040 * It will be restored automatically when exiting
2043 armv8_reg_current(arm
, 0)->dirty
= true;
2046 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2047 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2048 if (retval
!= ERROR_OK
)
2051 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2053 /* Set Normal access mode */
2055 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2056 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2057 if (retval
!= ERROR_OK
)
2060 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2061 /* Write X0 with value 'address' using write procedure */
2062 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2063 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2064 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2065 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2067 /* Write R0 with value 'address' using write procedure */
2068 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2069 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2070 retval
= dpm
->instr_write_data_dcc(dpm
,
2071 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2074 if (retval
!= ERROR_OK
)
2077 if (size
== 4 && (address
% 4) == 0)
2078 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2080 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2082 if (dscr
& DSCR_MA
) {
2084 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2085 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2088 if (retval
!= ERROR_OK
)
2091 /* Check for sticky abort flags in the DSCR */
2092 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2093 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2094 if (retval
!= ERROR_OK
)
2099 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2100 /* Abort occurred - clear it and exit */
2101 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2102 armv8_dpm_handle_exception(dpm
, true);
2110 static int aarch64_read_phys_memory(struct target
*target
,
2111 target_addr_t address
, uint32_t size
,
2112 uint32_t count
, uint8_t *buffer
)
2114 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2116 if (count
&& buffer
) {
2117 /* read memory through APB-AP */
2118 retval
= aarch64_mmu_modify(target
, 0);
2119 if (retval
!= ERROR_OK
)
2121 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2126 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2127 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2129 int mmu_enabled
= 0;
2132 /* determine if MMU was enabled on target stop */
2133 retval
= aarch64_mmu(target
, &mmu_enabled
);
2134 if (retval
!= ERROR_OK
)
2138 /* enable MMU as we could have disabled it for phys access */
2139 retval
= aarch64_mmu_modify(target
, 1);
2140 if (retval
!= ERROR_OK
)
2143 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2146 static int aarch64_write_phys_memory(struct target
*target
,
2147 target_addr_t address
, uint32_t size
,
2148 uint32_t count
, const uint8_t *buffer
)
2150 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2152 if (count
&& buffer
) {
2153 /* write memory through APB-AP */
2154 retval
= aarch64_mmu_modify(target
, 0);
2155 if (retval
!= ERROR_OK
)
2157 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2163 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2164 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2166 int mmu_enabled
= 0;
2169 /* determine if MMU was enabled on target stop */
2170 retval
= aarch64_mmu(target
, &mmu_enabled
);
2171 if (retval
!= ERROR_OK
)
2175 /* enable MMU as we could have disabled it for phys access */
2176 retval
= aarch64_mmu_modify(target
, 1);
2177 if (retval
!= ERROR_OK
)
2180 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2183 static int aarch64_handle_target_request(void *priv
)
2185 struct target
*target
= priv
;
2186 struct armv8_common
*armv8
= target_to_armv8(target
);
2189 if (!target_was_examined(target
))
2191 if (!target
->dbg_msg_enabled
)
2194 if (target
->state
== TARGET_RUNNING
) {
2197 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2198 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2200 /* check if we have data */
2201 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2202 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2203 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2204 if (retval
== ERROR_OK
) {
2205 target_request(target
, request
);
2206 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2207 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2215 static int aarch64_examine_first(struct target
*target
)
2217 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2218 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2219 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2220 struct aarch64_private_config
*pc
;
2222 int retval
= ERROR_OK
;
2223 uint64_t debug
, ttypr
;
2225 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2226 debug
= ttypr
= cpuid
= 0;
2228 /* Search for the APB-AB - it is needed for access to debug registers */
2229 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2230 if (retval
!= ERROR_OK
) {
2231 LOG_ERROR("Could not find APB-AP for debug access");
2235 retval
= mem_ap_init(armv8
->debug_ap
);
2236 if (retval
!= ERROR_OK
) {
2237 LOG_ERROR("Could not initialize the APB-AP");
2241 armv8
->debug_ap
->memaccess_tck
= 10;
2243 if (!target
->dbgbase_set
) {
2245 /* Get ROM Table base */
2247 int32_t coreidx
= target
->coreid
;
2248 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2249 if (retval
!= ERROR_OK
)
2251 /* Lookup 0x15 -- Processor DAP */
2252 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2253 &armv8
->debug_base
, &coreidx
);
2254 if (retval
!= ERROR_OK
)
2256 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2257 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2259 armv8
->debug_base
= target
->dbgbase
;
2261 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2262 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2263 if (retval
!= ERROR_OK
) {
2264 LOG_DEBUG("Examine %s failed", "oslock");
2268 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2269 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2270 if (retval
!= ERROR_OK
) {
2271 LOG_DEBUG("Examine %s failed", "CPUID");
2275 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2276 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2277 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2278 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2279 if (retval
!= ERROR_OK
) {
2280 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2283 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2284 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2285 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2286 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2287 if (retval
!= ERROR_OK
) {
2288 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2292 retval
= dap_run(armv8
->debug_ap
->dap
);
2293 if (retval
!= ERROR_OK
) {
2294 LOG_ERROR("%s: examination failed\n", target_name(target
));
2299 ttypr
= (ttypr
<< 32) | tmp0
;
2301 debug
= (debug
<< 32) | tmp2
;
2303 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2304 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2305 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2307 if (target
->private_config
== NULL
)
2310 pc
= (struct aarch64_private_config
*)target
->private_config
;
2311 if (pc
->cti
== NULL
)
2314 armv8
->cti
= pc
->cti
;
2316 retval
= aarch64_dpm_setup(aarch64
, debug
);
2317 if (retval
!= ERROR_OK
)
2320 /* Setup Breakpoint Register Pairs */
2321 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2322 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2323 aarch64
->brp_num_available
= aarch64
->brp_num
;
2324 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2325 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2326 aarch64
->brp_list
[i
].used
= 0;
2327 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2328 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2330 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2331 aarch64
->brp_list
[i
].value
= 0;
2332 aarch64
->brp_list
[i
].control
= 0;
2333 aarch64
->brp_list
[i
].BRPn
= i
;
2336 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2338 target
->state
= TARGET_UNKNOWN
;
2339 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2340 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2341 target_set_examined(target
);
2345 static int aarch64_examine(struct target
*target
)
2347 int retval
= ERROR_OK
;
2349 /* don't re-probe hardware after each reset */
2350 if (!target_was_examined(target
))
2351 retval
= aarch64_examine_first(target
);
2353 /* Configure core debug access */
2354 if (retval
== ERROR_OK
)
2355 retval
= aarch64_init_debug_access(target
);
2361 * Cortex-A8 target creation and initialization
2364 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2365 struct target
*target
)
2367 /* examine_first() does a bunch of this */
2368 arm_semihosting_init(target
);
2372 static int aarch64_init_arch_info(struct target
*target
,
2373 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2375 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2377 /* Setup struct aarch64_common */
2378 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2379 armv8
->arm
.dap
= dap
;
2381 /* register arch-specific functions */
2382 armv8
->examine_debug_reason
= NULL
;
2383 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2384 armv8
->pre_restore_context
= NULL
;
2385 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2387 armv8_init_arch_info(target
, armv8
);
2388 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2393 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2395 struct aarch64_private_config
*pc
= target
->private_config
;
2396 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2398 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2401 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2404 static void aarch64_deinit_target(struct target
*target
)
2406 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2407 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2408 struct arm_dpm
*dpm
= &armv8
->dpm
;
2410 armv8_free_reg_cache(target
);
2411 free(aarch64
->brp_list
);
2414 free(target
->private_config
);
2418 static int aarch64_mmu(struct target
*target
, int *enabled
)
2420 if (target
->state
!= TARGET_HALTED
) {
2421 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2422 return ERROR_TARGET_INVALID
;
2425 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2429 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2430 target_addr_t
*phys
)
2432 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2436 * private target configuration items
2438 enum aarch64_cfg_param
{
2442 static const Jim_Nvp nvp_config_opts
[] = {
2443 { .name
= "-cti", .value
= CFG_CTI
},
2444 { .name
= NULL
, .value
= -1 }
2447 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2449 struct aarch64_private_config
*pc
;
2453 pc
= (struct aarch64_private_config
*)target
->private_config
;
2455 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2456 target
->private_config
= pc
;
2460 * Call adiv5_jim_configure() to parse the common DAP options
2461 * It will return JIM_CONTINUE if it didn't find any known
2462 * options, JIM_OK if it correctly parsed the topmost option
2463 * and JIM_ERR if an error occured during parameter evaluation.
2464 * For JIM_CONTINUE, we check our own params.
2466 e
= adiv5_jim_configure(target
, goi
);
2467 if (e
!= JIM_CONTINUE
)
2470 /* parse config or cget options ... */
2471 if (goi
->argc
> 0) {
2472 Jim_SetEmptyResult(goi
->interp
);
2474 /* check first if topmost item is for us */
2475 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2478 return JIM_CONTINUE
;
2480 e
= Jim_GetOpt_Obj(goi
, NULL
);
2486 if (goi
->isconfigure
) {
2488 struct arm_cti
*cti
;
2489 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2492 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2494 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2499 if (goi
->argc
!= 0) {
2500 Jim_WrongNumArgs(goi
->interp
,
2501 goi
->argc
, goi
->argv
,
2506 if (pc
== NULL
|| pc
->cti
== NULL
) {
2507 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2510 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2516 return JIM_CONTINUE
;
2523 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2525 struct target
*target
= get_current_target(CMD_CTX
);
2526 struct armv8_common
*armv8
= target_to_armv8(target
);
2528 return armv8_handle_cache_info_command(CMD_CTX
,
2529 &armv8
->armv8_mmu
.armv8_cache
);
2533 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2535 struct target
*target
= get_current_target(CMD_CTX
);
2536 if (!target_was_examined(target
)) {
2537 LOG_ERROR("target not examined yet");
2541 return aarch64_init_debug_access(target
);
2543 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2545 struct target
*target
= get_current_target(CMD_CTX
);
2546 /* check target is an smp target */
2547 struct target_list
*head
;
2548 struct target
*curr
;
2549 head
= target
->head
;
2551 if (head
!= (struct target_list
*)NULL
) {
2552 while (head
!= (struct target_list
*)NULL
) {
2553 curr
= head
->target
;
2557 /* fixes the target display to the debugger */
2558 target
->gdb_service
->target
= target
;
2563 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2565 struct target
*target
= get_current_target(CMD_CTX
);
2566 struct target_list
*head
;
2567 struct target
*curr
;
2568 head
= target
->head
;
2569 if (head
!= (struct target_list
*)NULL
) {
2571 while (head
!= (struct target_list
*)NULL
) {
2572 curr
= head
->target
;
2580 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2582 struct target
*target
= get_current_target(CMD_CTX
);
2583 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2585 static const Jim_Nvp nvp_maskisr_modes
[] = {
2586 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2587 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2588 { .name
= NULL
, .value
= -1 },
2593 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2594 if (n
->name
== NULL
) {
2595 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2596 return ERROR_COMMAND_SYNTAX_ERROR
;
2599 aarch64
->isrmasking_mode
= n
->value
;
2602 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2603 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2608 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2610 struct command_context
*context
;
2611 struct target
*target
;
2614 bool is_mcr
= false;
2617 if (Jim_CompareStringImmediate(interp
, argv
[0], "mcr")) {
2624 context
= current_command_context(interp
);
2625 assert(context
!= NULL
);
2627 target
= get_current_target(context
);
2628 if (target
== NULL
) {
2629 LOG_ERROR("%s: no current target", __func__
);
2632 if (!target_was_examined(target
)) {
2633 LOG_ERROR("%s: not yet examined", target_name(target
));
2637 arm
= target_to_arm(target
);
2639 LOG_ERROR("%s: not an ARM", target_name(target
));
2643 if (target
->state
!= TARGET_HALTED
)
2644 return ERROR_TARGET_NOT_HALTED
;
2646 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2647 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
2651 if (argc
!= arg_cnt
) {
2652 LOG_ERROR("%s: wrong number of arguments", __func__
);
2664 /* NOTE: parameter sequence matches ARM instruction set usage:
2665 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2666 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2667 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2669 retval
= Jim_GetLong(interp
, argv
[1], &l
);
2670 if (retval
!= JIM_OK
)
2673 LOG_ERROR("%s: %s %d out of range", __func__
,
2674 "coprocessor", (int) l
);
2679 retval
= Jim_GetLong(interp
, argv
[2], &l
);
2680 if (retval
!= JIM_OK
)
2683 LOG_ERROR("%s: %s %d out of range", __func__
,
2689 retval
= Jim_GetLong(interp
, argv
[3], &l
);
2690 if (retval
!= JIM_OK
)
2693 LOG_ERROR("%s: %s %d out of range", __func__
,
2699 retval
= Jim_GetLong(interp
, argv
[4], &l
);
2700 if (retval
!= JIM_OK
)
2703 LOG_ERROR("%s: %s %d out of range", __func__
,
2709 retval
= Jim_GetLong(interp
, argv
[5], &l
);
2710 if (retval
!= JIM_OK
)
2713 LOG_ERROR("%s: %s %d out of range", __func__
,
2721 if (is_mcr
== true) {
2722 retval
= Jim_GetLong(interp
, argv
[6], &l
);
2723 if (retval
!= JIM_OK
)
2727 /* NOTE: parameters reordered! */
2728 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2729 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
2730 if (retval
!= ERROR_OK
)
2733 /* NOTE: parameters reordered! */
2734 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2735 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
2736 if (retval
!= ERROR_OK
)
2739 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
2745 static const struct command_registration aarch64_exec_command_handlers
[] = {
2747 .name
= "cache_info",
2748 .handler
= aarch64_handle_cache_info_command
,
2749 .mode
= COMMAND_EXEC
,
2750 .help
= "display information about target caches",
2755 .handler
= aarch64_handle_dbginit_command
,
2756 .mode
= COMMAND_EXEC
,
2757 .help
= "Initialize core debug",
2760 { .name
= "smp_off",
2761 .handler
= aarch64_handle_smp_off_command
,
2762 .mode
= COMMAND_EXEC
,
2763 .help
= "Stop smp handling",
2768 .handler
= aarch64_handle_smp_on_command
,
2769 .mode
= COMMAND_EXEC
,
2770 .help
= "Restart smp handling",
2775 .handler
= aarch64_mask_interrupts_command
,
2776 .mode
= COMMAND_ANY
,
2777 .help
= "mask aarch64 interrupts during single-step",
2778 .usage
= "['on'|'off']",
2782 .mode
= COMMAND_EXEC
,
2783 .jim_handler
= jim_mcrmrc
,
2784 .help
= "write coprocessor register",
2785 .usage
= "cpnum op1 CRn CRm op2 value",
2789 .mode
= COMMAND_EXEC
,
2790 .jim_handler
= jim_mcrmrc
,
2791 .help
= "read coprocessor register",
2792 .usage
= "cpnum op1 CRn CRm op2",
2796 COMMAND_REGISTRATION_DONE
2799 static const struct command_registration aarch64_command_handlers
[] = {
2801 .chain
= armv8_command_handlers
,
2805 .mode
= COMMAND_ANY
,
2806 .help
= "Aarch64 command group",
2808 .chain
= aarch64_exec_command_handlers
,
2810 COMMAND_REGISTRATION_DONE
2813 struct target_type aarch64_target
= {
2816 .poll
= aarch64_poll
,
2817 .arch_state
= armv8_arch_state
,
2819 .halt
= aarch64_halt
,
2820 .resume
= aarch64_resume
,
2821 .step
= aarch64_step
,
2823 .assert_reset
= aarch64_assert_reset
,
2824 .deassert_reset
= aarch64_deassert_reset
,
2826 /* REVISIT allow exporting VFP3 registers ... */
2827 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2829 .read_memory
= aarch64_read_memory
,
2830 .write_memory
= aarch64_write_memory
,
2832 .add_breakpoint
= aarch64_add_breakpoint
,
2833 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2834 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2835 .remove_breakpoint
= aarch64_remove_breakpoint
,
2836 .add_watchpoint
= NULL
,
2837 .remove_watchpoint
= NULL
,
2839 .commands
= aarch64_command_handlers
,
2840 .target_create
= aarch64_target_create
,
2841 .target_jim_configure
= aarch64_jim_configure
,
2842 .init_target
= aarch64_init_target
,
2843 .deinit_target
= aarch64_deinit_target
,
2844 .examine
= aarch64_examine
,
2846 .read_phys_memory
= aarch64_read_phys_memory
,
2847 .write_phys_memory
= aarch64_write_phys_memory
,
2849 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)