1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
43 struct aarch64_private_config
{
47 static int aarch64_poll(struct target
*target
);
48 static int aarch64_debug_entry(struct target
*target
);
49 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
50 static int aarch64_set_breakpoint(struct target
*target
,
51 struct breakpoint
*breakpoint
, uint8_t matchmode
);
52 static int aarch64_set_context_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
, uint8_t matchmode
);
54 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
55 struct breakpoint
*breakpoint
);
56 static int aarch64_unset_breakpoint(struct target
*target
,
57 struct breakpoint
*breakpoint
);
58 static int aarch64_mmu(struct target
*target
, int *enabled
);
59 static int aarch64_virt2phys(struct target
*target
,
60 target_addr_t virt
, target_addr_t
*phys
);
61 static int aarch64_read_cpu_memory(struct target
*target
,
62 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
64 #define foreach_smp_target(pos, head) \
65 for (pos = head; (pos != NULL); pos = pos->next)
67 static int aarch64_restore_system_control_reg(struct target
*target
)
69 enum arm_mode target_mode
= ARM_MODE_ANY
;
70 int retval
= ERROR_OK
;
73 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
74 struct armv8_common
*armv8
= target_to_armv8(target
);
76 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
77 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
78 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
80 switch (armv8
->arm
.core_mode
) {
82 target_mode
= ARMV8_64_EL1H
;
86 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
90 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
94 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
101 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
105 LOG_INFO("cannot read system control register in this mode");
109 if (target_mode
!= ARM_MODE_ANY
)
110 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
112 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
113 if (retval
!= ERROR_OK
)
116 if (target_mode
!= ARM_MODE_ANY
)
117 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
123 /* modify system_control_reg in order to enable or disable mmu for :
124 * - virt2phys address conversion
125 * - read or write memory in phys or virt address */
126 static int aarch64_mmu_modify(struct target
*target
, int enable
)
128 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
129 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
130 int retval
= ERROR_OK
;
134 /* if mmu enabled at target stop and mmu not enable */
135 if (!(aarch64
->system_control_reg
& 0x1U
)) {
136 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
140 aarch64
->system_control_reg_curr
|= 0x1U
;
142 if (aarch64
->system_control_reg_curr
& 0x4U
) {
143 /* data cache is active */
144 aarch64
->system_control_reg_curr
&= ~0x4U
;
145 /* flush data cache armv8 function to be called */
146 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
147 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
149 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
150 aarch64
->system_control_reg_curr
&= ~0x1U
;
154 switch (armv8
->arm
.core_mode
) {
158 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
162 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
166 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
173 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
177 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
181 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
182 aarch64
->system_control_reg_curr
);
187 * Basic debug access, very low level assumes state is saved
189 static int aarch64_init_debug_access(struct target
*target
)
191 struct armv8_common
*armv8
= target_to_armv8(target
);
195 LOG_DEBUG("%s", target_name(target
));
197 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
198 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
199 if (retval
!= ERROR_OK
) {
200 LOG_DEBUG("Examine %s failed", "oslock");
204 /* Clear Sticky Power Down status Bit in PRSR to enable access to
205 the registers in the Core Power Domain */
206 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
207 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
208 if (retval
!= ERROR_OK
)
212 * Static CTI configuration:
213 * Channel 0 -> trigger outputs HALT request to PE
214 * Channel 1 -> trigger outputs Resume request to PE
215 * Gate all channel trigger events from entering the CTM
219 retval
= arm_cti_enable(armv8
->cti
, true);
220 /* By default, gate all channel events to and from the CTM */
221 if (retval
== ERROR_OK
)
222 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
223 /* output halt requests to PE on channel 0 event */
224 if (retval
== ERROR_OK
)
225 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
226 /* output restart requests to PE on channel 1 event */
227 if (retval
== ERROR_OK
)
228 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
229 if (retval
!= ERROR_OK
)
232 /* Resync breakpoint registers */
237 /* Write to memory mapped registers directly with no cache or mmu handling */
238 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
243 struct armv8_common
*armv8
= target_to_armv8(target
);
245 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
250 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
252 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
255 dpm
->arm
= &a8
->armv8_common
.arm
;
258 retval
= armv8_dpm_setup(dpm
);
259 if (retval
== ERROR_OK
)
260 retval
= armv8_dpm_initialize(dpm
);
265 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
267 struct armv8_common
*armv8
= target_to_armv8(target
);
268 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
271 static int aarch64_check_state_one(struct target
*target
,
272 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
274 struct armv8_common
*armv8
= target_to_armv8(target
);
278 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
279 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
280 if (retval
!= ERROR_OK
)
287 *p_result
= (prsr
& mask
) == (val
& mask
);
292 static int aarch64_wait_halt_one(struct target
*target
)
294 int retval
= ERROR_OK
;
297 int64_t then
= timeval_ms();
301 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
302 if (retval
!= ERROR_OK
|| halted
)
305 if (timeval_ms() > then
+ 1000) {
306 retval
= ERROR_TARGET_TIMEOUT
;
307 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
314 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
316 int retval
= ERROR_OK
;
317 struct target_list
*head
= target
->head
;
318 struct target
*first
= NULL
;
320 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
322 while (head
!= NULL
) {
323 struct target
*curr
= head
->target
;
324 struct armv8_common
*armv8
= target_to_armv8(curr
);
327 if (exc_target
&& curr
== target
)
329 if (!target_was_examined(curr
))
331 if (curr
->state
!= TARGET_RUNNING
)
334 /* HACK: mark this target as prepared for halting */
335 curr
->debug_reason
= DBG_REASON_DBGRQ
;
337 /* open the gate for channel 0 to let HALT requests pass to the CTM */
338 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
339 if (retval
== ERROR_OK
)
340 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
341 if (retval
!= ERROR_OK
)
344 LOG_DEBUG("target %s prepared", target_name(curr
));
351 if (exc_target
&& first
)
360 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
362 int retval
= ERROR_OK
;
363 struct armv8_common
*armv8
= target_to_armv8(target
);
365 LOG_DEBUG("%s", target_name(target
));
367 /* allow Halting Debug Mode */
368 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
369 if (retval
!= ERROR_OK
)
372 /* trigger an event on channel 0, this outputs a halt request to the PE */
373 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
374 if (retval
!= ERROR_OK
)
377 if (mode
== HALT_SYNC
) {
378 retval
= aarch64_wait_halt_one(target
);
379 if (retval
!= ERROR_OK
) {
380 if (retval
== ERROR_TARGET_TIMEOUT
)
381 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
389 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
391 struct target
*next
= target
;
394 /* prepare halt on all PEs of the group */
395 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
397 if (exc_target
&& next
== target
)
400 /* halt the target PE */
401 if (retval
== ERROR_OK
)
402 retval
= aarch64_halt_one(next
, HALT_LAZY
);
404 if (retval
!= ERROR_OK
)
407 /* wait for all PEs to halt */
408 int64_t then
= timeval_ms();
410 bool all_halted
= true;
411 struct target_list
*head
;
414 foreach_smp_target(head
, target
->head
) {
419 if (!target_was_examined(curr
))
422 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
423 if (retval
!= ERROR_OK
|| !halted
) {
432 if (timeval_ms() > then
+ 1000) {
433 retval
= ERROR_TARGET_TIMEOUT
;
438 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
439 * and it looks like the CTI's are not connected by a common
440 * trigger matrix. It seems that we need to halt one core in each
441 * cluster explicitly. So if we find that a core has not halted
442 * yet, we trigger an explicit halt for the second cluster.
444 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
445 if (retval
!= ERROR_OK
)
452 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
454 struct target
*gdb_target
= NULL
;
455 struct target_list
*head
;
458 if (debug_reason
== DBG_REASON_NOTHALTED
) {
459 LOG_DEBUG("Halting remaining targets in SMP group");
460 aarch64_halt_smp(target
, true);
463 /* poll all targets in the group, but skip the target that serves GDB */
464 foreach_smp_target(head
, target
->head
) {
466 /* skip calling context */
469 if (!target_was_examined(curr
))
471 /* skip targets that were already halted */
472 if (curr
->state
== TARGET_HALTED
)
474 /* remember the gdb_service->target */
475 if (curr
->gdb_service
!= NULL
)
476 gdb_target
= curr
->gdb_service
->target
;
478 if (curr
== gdb_target
)
481 /* avoid recursion in aarch64_poll() */
487 /* after all targets were updated, poll the gdb serving target */
488 if (gdb_target
!= NULL
&& gdb_target
!= target
)
489 aarch64_poll(gdb_target
);
495 * Aarch64 Run control
498 static int aarch64_poll(struct target
*target
)
500 enum target_state prev_target_state
;
501 int retval
= ERROR_OK
;
504 retval
= aarch64_check_state_one(target
,
505 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
506 if (retval
!= ERROR_OK
)
510 prev_target_state
= target
->state
;
511 if (prev_target_state
!= TARGET_HALTED
) {
512 enum target_debug_reason debug_reason
= target
->debug_reason
;
514 /* We have a halting debug event */
515 target
->state
= TARGET_HALTED
;
516 LOG_DEBUG("Target %s halted", target_name(target
));
517 retval
= aarch64_debug_entry(target
);
518 if (retval
!= ERROR_OK
)
522 update_halt_gdb(target
, debug_reason
);
524 switch (prev_target_state
) {
528 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
530 case TARGET_DEBUG_RUNNING
:
531 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
538 target
->state
= TARGET_RUNNING
;
543 static int aarch64_halt(struct target
*target
)
546 return aarch64_halt_smp(target
, false);
548 return aarch64_halt_one(target
, HALT_SYNC
);
551 static int aarch64_restore_one(struct target
*target
, int current
,
552 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
554 struct armv8_common
*armv8
= target_to_armv8(target
);
555 struct arm
*arm
= &armv8
->arm
;
559 LOG_DEBUG("%s", target_name(target
));
561 if (!debug_execution
)
562 target_free_all_working_areas(target
);
564 /* current = 1: continue on current pc, otherwise continue at <address> */
565 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
567 resume_pc
= *address
;
569 *address
= resume_pc
;
571 /* Make sure that the Armv7 gdb thumb fixups does not
572 * kill the return address
574 switch (arm
->core_state
) {
576 resume_pc
&= 0xFFFFFFFC;
578 case ARM_STATE_AARCH64
:
579 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
581 case ARM_STATE_THUMB
:
582 case ARM_STATE_THUMB_EE
:
583 /* When the return address is loaded into PC
584 * bit 0 must be 1 to stay in Thumb state
588 case ARM_STATE_JAZELLE
:
589 LOG_ERROR("How do I resume into Jazelle state??");
592 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
593 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
597 /* called it now before restoring context because it uses cpu
598 * register r0 for restoring system control register */
599 retval
= aarch64_restore_system_control_reg(target
);
600 if (retval
== ERROR_OK
)
601 retval
= aarch64_restore_context(target
, handle_breakpoints
);
607 * prepare single target for restart
611 static int aarch64_prepare_restart_one(struct target
*target
)
613 struct armv8_common
*armv8
= target_to_armv8(target
);
618 LOG_DEBUG("%s", target_name(target
));
620 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
621 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
622 if (retval
!= ERROR_OK
)
625 if ((dscr
& DSCR_ITE
) == 0)
626 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
627 if ((dscr
& DSCR_ERR
) != 0)
628 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
630 /* acknowledge a pending CTI halt event */
631 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
633 * open the CTI gate for channel 1 so that the restart events
634 * get passed along to all PEs. Also close gate for channel 0
635 * to isolate the PE from halt events.
637 if (retval
== ERROR_OK
)
638 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
639 if (retval
== ERROR_OK
)
640 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
642 /* make sure that DSCR.HDE is set */
643 if (retval
== ERROR_OK
) {
645 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
646 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
649 if (retval
== ERROR_OK
) {
650 /* clear sticky bits in PRSR, SDR is now 0 */
651 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
652 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
658 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
660 struct armv8_common
*armv8
= target_to_armv8(target
);
663 LOG_DEBUG("%s", target_name(target
));
665 /* trigger an event on channel 1, generates a restart request to the PE */
666 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
667 if (retval
!= ERROR_OK
)
670 if (mode
== RESTART_SYNC
) {
671 int64_t then
= timeval_ms();
675 * if PRSR.SDR is set now, the target did restart, even
676 * if it's now already halted again (e.g. due to breakpoint)
678 retval
= aarch64_check_state_one(target
,
679 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
680 if (retval
!= ERROR_OK
|| resumed
)
683 if (timeval_ms() > then
+ 1000) {
684 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
685 retval
= ERROR_TARGET_TIMEOUT
;
691 if (retval
!= ERROR_OK
)
694 target
->debug_reason
= DBG_REASON_NOTHALTED
;
695 target
->state
= TARGET_RUNNING
;
700 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
704 LOG_DEBUG("%s", target_name(target
));
706 retval
= aarch64_prepare_restart_one(target
);
707 if (retval
== ERROR_OK
)
708 retval
= aarch64_do_restart_one(target
, mode
);
714 * prepare all but the current target for restart
716 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
718 int retval
= ERROR_OK
;
719 struct target_list
*head
;
720 struct target
*first
= NULL
;
723 foreach_smp_target(head
, target
->head
) {
724 struct target
*curr
= head
->target
;
726 /* skip calling target */
729 if (!target_was_examined(curr
))
731 if (curr
->state
!= TARGET_HALTED
)
734 /* resume at current address, not in step mode */
735 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
736 if (retval
== ERROR_OK
)
737 retval
= aarch64_prepare_restart_one(curr
);
738 if (retval
!= ERROR_OK
) {
739 LOG_ERROR("failed to restore target %s", target_name(curr
));
742 /* remember the first valid target in the group */
754 static int aarch64_step_restart_smp(struct target
*target
)
756 int retval
= ERROR_OK
;
757 struct target_list
*head
;
758 struct target
*first
= NULL
;
760 LOG_DEBUG("%s", target_name(target
));
762 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
763 if (retval
!= ERROR_OK
)
767 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
768 if (retval
!= ERROR_OK
) {
769 LOG_DEBUG("error restarting target %s", target_name(first
));
773 int64_t then
= timeval_ms();
775 struct target
*curr
= target
;
776 bool all_resumed
= true;
778 foreach_smp_target(head
, target
->head
) {
787 if (!target_was_examined(curr
))
790 retval
= aarch64_check_state_one(curr
,
791 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
792 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
797 if (curr
->state
!= TARGET_RUNNING
) {
798 curr
->state
= TARGET_RUNNING
;
799 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
800 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
807 if (timeval_ms() > then
+ 1000) {
808 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
809 retval
= ERROR_TARGET_TIMEOUT
;
813 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
814 * and it looks like the CTI's are not connected by a common
815 * trigger matrix. It seems that we need to halt one core in each
816 * cluster explicitly. So if we find that a core has not halted
817 * yet, we trigger an explicit resume for the second cluster.
819 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
820 if (retval
!= ERROR_OK
)
827 static int aarch64_resume(struct target
*target
, int current
,
828 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
831 uint64_t addr
= address
;
833 if (target
->state
!= TARGET_HALTED
)
834 return ERROR_TARGET_NOT_HALTED
;
837 * If this target is part of a SMP group, prepare the others
838 * targets for resuming. This involves restoring the complete
839 * target register context and setting up CTI gates to accept
840 * resume events from the trigger matrix.
843 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
844 if (retval
!= ERROR_OK
)
848 /* all targets prepared, restore and restart the current target */
849 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
851 if (retval
== ERROR_OK
)
852 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
853 if (retval
!= ERROR_OK
)
857 int64_t then
= timeval_ms();
859 struct target
*curr
= target
;
860 struct target_list
*head
;
861 bool all_resumed
= true;
863 foreach_smp_target(head
, target
->head
) {
870 if (!target_was_examined(curr
))
873 retval
= aarch64_check_state_one(curr
,
874 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
875 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
880 if (curr
->state
!= TARGET_RUNNING
) {
881 curr
->state
= TARGET_RUNNING
;
882 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
883 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
890 if (timeval_ms() > then
+ 1000) {
891 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
892 retval
= ERROR_TARGET_TIMEOUT
;
897 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
898 * and it looks like the CTI's are not connected by a common
899 * trigger matrix. It seems that we need to halt one core in each
900 * cluster explicitly. So if we find that a core has not halted
901 * yet, we trigger an explicit resume for the second cluster.
903 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
904 if (retval
!= ERROR_OK
)
909 if (retval
!= ERROR_OK
)
912 target
->debug_reason
= DBG_REASON_NOTHALTED
;
914 if (!debug_execution
) {
915 target
->state
= TARGET_RUNNING
;
916 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
917 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
919 target
->state
= TARGET_DEBUG_RUNNING
;
920 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
921 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
927 static int aarch64_debug_entry(struct target
*target
)
929 int retval
= ERROR_OK
;
930 struct armv8_common
*armv8
= target_to_armv8(target
);
931 struct arm_dpm
*dpm
= &armv8
->dpm
;
932 enum arm_state core_state
;
935 /* make sure to clear all sticky errors */
936 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
937 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
938 if (retval
== ERROR_OK
)
939 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
940 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
941 if (retval
== ERROR_OK
)
942 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
944 if (retval
!= ERROR_OK
)
947 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
950 core_state
= armv8_dpm_get_core_state(dpm
);
951 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
952 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
954 /* close the CTI gate for all events */
955 if (retval
== ERROR_OK
)
956 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
957 /* discard async exceptions */
958 if (retval
== ERROR_OK
)
959 retval
= dpm
->instr_cpsr_sync(dpm
);
960 if (retval
!= ERROR_OK
)
963 /* Examine debug reason */
964 armv8_dpm_report_dscr(dpm
, dscr
);
966 /* save address of instruction that triggered the watchpoint? */
967 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
971 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
972 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
974 if (retval
!= ERROR_OK
)
978 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
979 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
981 if (retval
!= ERROR_OK
)
984 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
987 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
989 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
990 retval
= armv8
->post_debug_entry(target
);
995 static int aarch64_post_debug_entry(struct target
*target
)
997 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
998 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1000 enum arm_mode target_mode
= ARM_MODE_ANY
;
1003 switch (armv8
->arm
.core_mode
) {
1005 target_mode
= ARMV8_64_EL1H
;
1009 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1013 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1017 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1024 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1028 LOG_INFO("cannot read system control register in this mode");
1032 if (target_mode
!= ARM_MODE_ANY
)
1033 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1035 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1036 if (retval
!= ERROR_OK
)
1039 if (target_mode
!= ARM_MODE_ANY
)
1040 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1042 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1043 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1045 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1046 armv8_identify_cache(armv8
);
1047 armv8_read_mpidr(armv8
);
1050 armv8
->armv8_mmu
.mmu_enabled
=
1051 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1052 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1053 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1054 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1055 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1060 * single-step a target
1062 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1063 int handle_breakpoints
)
1065 struct armv8_common
*armv8
= target_to_armv8(target
);
1066 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1067 int saved_retval
= ERROR_OK
;
1071 if (target
->state
!= TARGET_HALTED
) {
1072 LOG_WARNING("target not halted");
1073 return ERROR_TARGET_NOT_HALTED
;
1076 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1077 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1078 /* make sure EDECR.SS is not set when restoring the register */
1080 if (retval
== ERROR_OK
) {
1082 /* set EDECR.SS to enter hardware step mode */
1083 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1084 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1086 /* disable interrupts while stepping */
1087 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1088 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1089 /* bail out if stepping setup has failed */
1090 if (retval
!= ERROR_OK
)
1093 if (target
->smp
&& (current
== 1)) {
1095 * isolate current target so that it doesn't get resumed
1096 * together with the others
1098 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1099 /* resume all other targets in the group */
1100 if (retval
== ERROR_OK
)
1101 retval
= aarch64_step_restart_smp(target
);
1102 if (retval
!= ERROR_OK
) {
1103 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1106 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1109 /* all other targets running, restore and restart the current target */
1110 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1111 if (retval
== ERROR_OK
)
1112 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1114 if (retval
!= ERROR_OK
)
1117 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1118 if (!handle_breakpoints
)
1119 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1121 int64_t then
= timeval_ms();
1126 retval
= aarch64_check_state_one(target
,
1127 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1128 if (retval
!= ERROR_OK
|| stepped
)
1131 if (timeval_ms() > then
+ 100) {
1132 LOG_ERROR("timeout waiting for target %s halt after step",
1133 target_name(target
));
1134 retval
= ERROR_TARGET_TIMEOUT
;
1140 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1141 * causes a timeout. The core takes the step but doesn't complete it and so
1142 * debug state is never entered. However, you can manually halt the core
1143 * as an external debug even is also a WFI wakeup event.
1145 if (retval
== ERROR_TARGET_TIMEOUT
)
1146 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1149 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1150 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1151 if (retval
!= ERROR_OK
)
1154 /* restore interrupts */
1155 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1156 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1157 if (retval
!= ERROR_OK
)
1161 if (saved_retval
!= ERROR_OK
)
1162 return saved_retval
;
1164 return aarch64_poll(target
);
1167 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1169 struct armv8_common
*armv8
= target_to_armv8(target
);
1170 struct arm
*arm
= &armv8
->arm
;
1174 LOG_DEBUG("%s", target_name(target
));
1176 if (armv8
->pre_restore_context
)
1177 armv8
->pre_restore_context(target
);
1179 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1180 if (retval
== ERROR_OK
) {
1181 /* registers are now invalid */
1182 register_cache_invalidate(arm
->core_cache
);
1183 register_cache_invalidate(arm
->core_cache
->next
);
1190 * Cortex-A8 Breakpoint and watchpoint functions
1193 /* Setup hardware Breakpoint Register Pair */
1194 static int aarch64_set_breakpoint(struct target
*target
,
1195 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1200 uint8_t byte_addr_select
= 0x0F;
1201 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1202 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1203 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1205 if (breakpoint
->set
) {
1206 LOG_WARNING("breakpoint already set");
1210 if (breakpoint
->type
== BKPT_HARD
) {
1212 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1214 if (brp_i
>= aarch64
->brp_num
) {
1215 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1216 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1218 breakpoint
->set
= brp_i
+ 1;
1219 if (breakpoint
->length
== 2)
1220 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1221 control
= ((matchmode
& 0x7) << 20)
1223 | (byte_addr_select
<< 5)
1225 brp_list
[brp_i
].used
= 1;
1226 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1227 brp_list
[brp_i
].control
= control
;
1228 bpt_value
= brp_list
[brp_i
].value
;
1230 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1231 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1232 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1233 if (retval
!= ERROR_OK
)
1235 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1236 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1237 (uint32_t)(bpt_value
>> 32));
1238 if (retval
!= ERROR_OK
)
1241 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1242 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1243 brp_list
[brp_i
].control
);
1244 if (retval
!= ERROR_OK
)
1246 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1247 brp_list
[brp_i
].control
,
1248 brp_list
[brp_i
].value
);
1250 } else if (breakpoint
->type
== BKPT_SOFT
) {
1253 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1254 retval
= target_read_memory(target
,
1255 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1256 breakpoint
->length
, 1,
1257 breakpoint
->orig_instr
);
1258 if (retval
!= ERROR_OK
)
1261 armv8_cache_d_inner_flush_virt(armv8
,
1262 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1263 breakpoint
->length
);
1265 retval
= target_write_memory(target
,
1266 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1267 breakpoint
->length
, 1, code
);
1268 if (retval
!= ERROR_OK
)
1271 armv8_cache_d_inner_flush_virt(armv8
,
1272 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1273 breakpoint
->length
);
1275 armv8_cache_i_inner_inval_virt(armv8
,
1276 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1277 breakpoint
->length
);
1279 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1282 /* Ensure that halting debug mode is enable */
1283 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1284 if (retval
!= ERROR_OK
) {
1285 LOG_DEBUG("Failed to set DSCR.HDE");
1292 static int aarch64_set_context_breakpoint(struct target
*target
,
1293 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1295 int retval
= ERROR_FAIL
;
1298 uint8_t byte_addr_select
= 0x0F;
1299 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1300 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1301 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1303 if (breakpoint
->set
) {
1304 LOG_WARNING("breakpoint already set");
1307 /*check available context BRPs*/
1308 while ((brp_list
[brp_i
].used
||
1309 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1312 if (brp_i
>= aarch64
->brp_num
) {
1313 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1317 breakpoint
->set
= brp_i
+ 1;
1318 control
= ((matchmode
& 0x7) << 20)
1320 | (byte_addr_select
<< 5)
1322 brp_list
[brp_i
].used
= 1;
1323 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1324 brp_list
[brp_i
].control
= control
;
1325 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1326 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1327 brp_list
[brp_i
].value
);
1328 if (retval
!= ERROR_OK
)
1330 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1331 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1332 brp_list
[brp_i
].control
);
1333 if (retval
!= ERROR_OK
)
1335 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1336 brp_list
[brp_i
].control
,
1337 brp_list
[brp_i
].value
);
1342 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1344 int retval
= ERROR_FAIL
;
1345 int brp_1
= 0; /* holds the contextID pair */
1346 int brp_2
= 0; /* holds the IVA pair */
1347 uint32_t control_CTX
, control_IVA
;
1348 uint8_t CTX_byte_addr_select
= 0x0F;
1349 uint8_t IVA_byte_addr_select
= 0x0F;
1350 uint8_t CTX_machmode
= 0x03;
1351 uint8_t IVA_machmode
= 0x01;
1352 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1353 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1354 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1356 if (breakpoint
->set
) {
1357 LOG_WARNING("breakpoint already set");
1360 /*check available context BRPs*/
1361 while ((brp_list
[brp_1
].used
||
1362 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1365 printf("brp(CTX) found num: %d\n", brp_1
);
1366 if (brp_1
>= aarch64
->brp_num
) {
1367 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371 while ((brp_list
[brp_2
].used
||
1372 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1375 printf("brp(IVA) found num: %d\n", brp_2
);
1376 if (brp_2
>= aarch64
->brp_num
) {
1377 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1381 breakpoint
->set
= brp_1
+ 1;
1382 breakpoint
->linked_BRP
= brp_2
;
1383 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1386 | (CTX_byte_addr_select
<< 5)
1388 brp_list
[brp_1
].used
= 1;
1389 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1390 brp_list
[brp_1
].control
= control_CTX
;
1391 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1392 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1393 brp_list
[brp_1
].value
);
1394 if (retval
!= ERROR_OK
)
1396 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1397 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1398 brp_list
[brp_1
].control
);
1399 if (retval
!= ERROR_OK
)
1402 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1405 | (IVA_byte_addr_select
<< 5)
1407 brp_list
[brp_2
].used
= 1;
1408 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1409 brp_list
[brp_2
].control
= control_IVA
;
1410 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1411 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1412 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1413 if (retval
!= ERROR_OK
)
1415 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1416 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1417 brp_list
[brp_2
].value
>> 32);
1418 if (retval
!= ERROR_OK
)
1420 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1421 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1422 brp_list
[brp_2
].control
);
1423 if (retval
!= ERROR_OK
)
1429 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1432 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1433 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1434 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1436 if (!breakpoint
->set
) {
1437 LOG_WARNING("breakpoint not set");
1441 if (breakpoint
->type
== BKPT_HARD
) {
1442 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1443 int brp_i
= breakpoint
->set
- 1;
1444 int brp_j
= breakpoint
->linked_BRP
;
1445 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1446 LOG_DEBUG("Invalid BRP number in breakpoint");
1449 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1450 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1451 brp_list
[brp_i
].used
= 0;
1452 brp_list
[brp_i
].value
= 0;
1453 brp_list
[brp_i
].control
= 0;
1454 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1455 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1456 brp_list
[brp_i
].control
);
1457 if (retval
!= ERROR_OK
)
1459 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1460 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1461 (uint32_t)brp_list
[brp_i
].value
);
1462 if (retval
!= ERROR_OK
)
1464 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1465 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1466 (uint32_t)brp_list
[brp_i
].value
);
1467 if (retval
!= ERROR_OK
)
1469 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1470 LOG_DEBUG("Invalid BRP number in breakpoint");
1473 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1474 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1475 brp_list
[brp_j
].used
= 0;
1476 brp_list
[brp_j
].value
= 0;
1477 brp_list
[brp_j
].control
= 0;
1478 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1479 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1480 brp_list
[brp_j
].control
);
1481 if (retval
!= ERROR_OK
)
1483 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1484 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1485 (uint32_t)brp_list
[brp_j
].value
);
1486 if (retval
!= ERROR_OK
)
1488 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1489 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1490 (uint32_t)brp_list
[brp_j
].value
);
1491 if (retval
!= ERROR_OK
)
1494 breakpoint
->linked_BRP
= 0;
1495 breakpoint
->set
= 0;
1499 int brp_i
= breakpoint
->set
- 1;
1500 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1501 LOG_DEBUG("Invalid BRP number in breakpoint");
1504 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1505 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1506 brp_list
[brp_i
].used
= 0;
1507 brp_list
[brp_i
].value
= 0;
1508 brp_list
[brp_i
].control
= 0;
1509 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1510 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1511 brp_list
[brp_i
].control
);
1512 if (retval
!= ERROR_OK
)
1514 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1515 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1516 brp_list
[brp_i
].value
);
1517 if (retval
!= ERROR_OK
)
1520 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1521 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1522 (uint32_t)brp_list
[brp_i
].value
);
1523 if (retval
!= ERROR_OK
)
1525 breakpoint
->set
= 0;
1529 /* restore original instruction (kept in target endianness) */
1531 armv8_cache_d_inner_flush_virt(armv8
,
1532 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1533 breakpoint
->length
);
1535 if (breakpoint
->length
== 4) {
1536 retval
= target_write_memory(target
,
1537 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1538 4, 1, breakpoint
->orig_instr
);
1539 if (retval
!= ERROR_OK
)
1542 retval
= target_write_memory(target
,
1543 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1544 2, 1, breakpoint
->orig_instr
);
1545 if (retval
!= ERROR_OK
)
1549 armv8_cache_d_inner_flush_virt(armv8
,
1550 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1551 breakpoint
->length
);
1553 armv8_cache_i_inner_inval_virt(armv8
,
1554 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1555 breakpoint
->length
);
1557 breakpoint
->set
= 0;
1562 static int aarch64_add_breakpoint(struct target
*target
,
1563 struct breakpoint
*breakpoint
)
1565 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1567 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1568 LOG_INFO("no hardware breakpoint available");
1569 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1572 if (breakpoint
->type
== BKPT_HARD
)
1573 aarch64
->brp_num_available
--;
1575 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1578 static int aarch64_add_context_breakpoint(struct target
*target
,
1579 struct breakpoint
*breakpoint
)
1581 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1583 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1584 LOG_INFO("no hardware breakpoint available");
1585 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1588 if (breakpoint
->type
== BKPT_HARD
)
1589 aarch64
->brp_num_available
--;
1591 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1594 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1595 struct breakpoint
*breakpoint
)
1597 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1599 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1600 LOG_INFO("no hardware breakpoint available");
1601 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1604 if (breakpoint
->type
== BKPT_HARD
)
1605 aarch64
->brp_num_available
--;
1607 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1611 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1613 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1616 /* It is perfectly possible to remove breakpoints while the target is running */
1617 if (target
->state
!= TARGET_HALTED
) {
1618 LOG_WARNING("target not halted");
1619 return ERROR_TARGET_NOT_HALTED
;
1623 if (breakpoint
->set
) {
1624 aarch64_unset_breakpoint(target
, breakpoint
);
1625 if (breakpoint
->type
== BKPT_HARD
)
1626 aarch64
->brp_num_available
++;
1633 * Cortex-A8 Reset functions
1636 static int aarch64_assert_reset(struct target
*target
)
1638 struct armv8_common
*armv8
= target_to_armv8(target
);
1642 /* FIXME when halt is requested, make it work somehow... */
1644 /* Issue some kind of warm reset. */
1645 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1646 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1647 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1648 /* REVISIT handle "pulls" cases, if there's
1649 * hardware that needs them to work.
1651 jtag_add_reset(0, 1);
1653 LOG_ERROR("%s: how to reset?", target_name(target
));
1657 /* registers are now invalid */
1658 if (target_was_examined(target
)) {
1659 register_cache_invalidate(armv8
->arm
.core_cache
);
1660 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1663 target
->state
= TARGET_RESET
;
1668 static int aarch64_deassert_reset(struct target
*target
)
1674 /* be certain SRST is off */
1675 jtag_add_reset(0, 0);
1677 if (!target_was_examined(target
))
1680 retval
= aarch64_poll(target
);
1681 if (retval
!= ERROR_OK
)
1684 if (target
->reset_halt
) {
1685 if (target
->state
!= TARGET_HALTED
) {
1686 LOG_WARNING("%s: ran after reset and before halt ...",
1687 target_name(target
));
1688 retval
= target_halt(target
);
1689 if (retval
!= ERROR_OK
)
1694 return aarch64_init_debug_access(target
);
1697 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1698 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1700 struct armv8_common
*armv8
= target_to_armv8(target
);
1701 struct arm_dpm
*dpm
= &armv8
->dpm
;
1702 struct arm
*arm
= &armv8
->arm
;
1705 armv8_reg_current(arm
, 1)->dirty
= true;
1707 /* change DCC to normal mode if necessary */
1708 if (*dscr
& DSCR_MA
) {
1710 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1711 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1712 if (retval
!= ERROR_OK
)
1717 uint32_t data
, opcode
;
1719 /* write the data to store into DTRRX */
1723 data
= target_buffer_get_u16(target
, buffer
);
1725 data
= target_buffer_get_u32(target
, buffer
);
1726 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1727 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1728 if (retval
!= ERROR_OK
)
1731 if (arm
->core_state
== ARM_STATE_AARCH64
)
1732 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1734 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1735 if (retval
!= ERROR_OK
)
1739 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1741 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1743 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1744 retval
= dpm
->instr_execute(dpm
, opcode
);
1745 if (retval
!= ERROR_OK
)
1756 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1757 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1759 struct armv8_common
*armv8
= target_to_armv8(target
);
1760 struct arm
*arm
= &armv8
->arm
;
1763 armv8_reg_current(arm
, 1)->dirty
= true;
1765 /* Step 1.d - Change DCC to memory mode */
1767 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1768 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1769 if (retval
!= ERROR_OK
)
1773 /* Step 2.a - Do the write */
1774 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1775 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1776 if (retval
!= ERROR_OK
)
1779 /* Step 3.a - Switch DTR mode back to Normal mode */
1781 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1782 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1783 if (retval
!= ERROR_OK
)
1789 static int aarch64_write_cpu_memory(struct target
*target
,
1790 uint64_t address
, uint32_t size
,
1791 uint32_t count
, const uint8_t *buffer
)
1793 /* write memory through APB-AP */
1794 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1795 struct armv8_common
*armv8
= target_to_armv8(target
);
1796 struct arm_dpm
*dpm
= &armv8
->dpm
;
1797 struct arm
*arm
= &armv8
->arm
;
1800 if (target
->state
!= TARGET_HALTED
) {
1801 LOG_WARNING("target not halted");
1802 return ERROR_TARGET_NOT_HALTED
;
1805 /* Mark register X0 as dirty, as it will be used
1806 * for transferring the data.
1807 * It will be restored automatically when exiting
1810 armv8_reg_current(arm
, 0)->dirty
= true;
1812 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1815 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1816 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1817 if (retval
!= ERROR_OK
)
1820 /* Set Normal access mode */
1821 dscr
= (dscr
& ~DSCR_MA
);
1822 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1823 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1824 if (retval
!= ERROR_OK
)
1827 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1828 /* Write X0 with value 'address' using write procedure */
1829 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1830 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1831 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1832 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1834 /* Write R0 with value 'address' using write procedure */
1835 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1836 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1837 retval
= dpm
->instr_write_data_dcc(dpm
,
1838 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1841 if (retval
!= ERROR_OK
)
1844 if (size
== 4 && (address
% 4) == 0)
1845 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1847 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1849 if (retval
!= ERROR_OK
) {
1850 /* Unset DTR mode */
1851 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1852 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1854 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1855 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1858 /* Check for sticky abort flags in the DSCR */
1859 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1860 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1861 if (retval
!= ERROR_OK
)
1865 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1866 /* Abort occurred - clear it and exit */
1867 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1868 armv8_dpm_handle_exception(dpm
, true);
1876 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1877 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1879 struct armv8_common
*armv8
= target_to_armv8(target
);
1880 struct arm_dpm
*dpm
= &armv8
->dpm
;
1881 struct arm
*arm
= &armv8
->arm
;
1884 armv8_reg_current(arm
, 1)->dirty
= true;
1886 /* change DCC to normal mode (if necessary) */
1887 if (*dscr
& DSCR_MA
) {
1889 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1890 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1891 if (retval
!= ERROR_OK
)
1896 uint32_t opcode
, data
;
1899 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1901 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1903 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1904 retval
= dpm
->instr_execute(dpm
, opcode
);
1905 if (retval
!= ERROR_OK
)
1908 if (arm
->core_state
== ARM_STATE_AARCH64
)
1909 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1911 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1912 if (retval
!= ERROR_OK
)
1915 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1916 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1917 if (retval
!= ERROR_OK
)
1921 *buffer
= (uint8_t)data
;
1923 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1925 target_buffer_set_u32(target
, buffer
, data
);
1935 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1936 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1938 struct armv8_common
*armv8
= target_to_armv8(target
);
1939 struct arm_dpm
*dpm
= &armv8
->dpm
;
1940 struct arm
*arm
= &armv8
->arm
;
1944 /* Mark X1 as dirty */
1945 armv8_reg_current(arm
, 1)->dirty
= true;
1947 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1948 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1949 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1951 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1952 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1955 if (retval
!= ERROR_OK
)
1958 /* Step 1.e - Change DCC to memory mode */
1960 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1961 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1962 if (retval
!= ERROR_OK
)
1965 /* Step 1.f - read DBGDTRTX and discard the value */
1966 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1967 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1968 if (retval
!= ERROR_OK
)
1972 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1973 * Abort flags are sticky, so can be read at end of transactions
1975 * This data is read in aligned to 32 bit boundary.
1979 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1980 * increments X0 by 4. */
1981 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
1982 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1983 if (retval
!= ERROR_OK
)
1987 /* Step 3.a - set DTR access mode back to Normal mode */
1989 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1990 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1991 if (retval
!= ERROR_OK
)
1994 /* Step 3.b - read DBGDTRTX for the final value */
1995 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1996 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1997 if (retval
!= ERROR_OK
)
2000 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2004 static int aarch64_read_cpu_memory(struct target
*target
,
2005 target_addr_t address
, uint32_t size
,
2006 uint32_t count
, uint8_t *buffer
)
2008 /* read memory through APB-AP */
2009 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2010 struct armv8_common
*armv8
= target_to_armv8(target
);
2011 struct arm_dpm
*dpm
= &armv8
->dpm
;
2012 struct arm
*arm
= &armv8
->arm
;
2015 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2016 address
, size
, count
);
2018 if (target
->state
!= TARGET_HALTED
) {
2019 LOG_WARNING("target not halted");
2020 return ERROR_TARGET_NOT_HALTED
;
2023 /* Mark register X0 as dirty, as it will be used
2024 * for transferring the data.
2025 * It will be restored automatically when exiting
2028 armv8_reg_current(arm
, 0)->dirty
= true;
2031 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2032 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2033 if (retval
!= ERROR_OK
)
2036 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2038 /* Set Normal access mode */
2040 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2041 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2042 if (retval
!= ERROR_OK
)
2045 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2046 /* Write X0 with value 'address' using write procedure */
2047 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2048 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2049 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2050 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2052 /* Write R0 with value 'address' using write procedure */
2053 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2054 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2055 retval
= dpm
->instr_write_data_dcc(dpm
,
2056 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2059 if (retval
!= ERROR_OK
)
2062 if (size
== 4 && (address
% 4) == 0)
2063 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2065 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2067 if (dscr
& DSCR_MA
) {
2069 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2070 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2073 if (retval
!= ERROR_OK
)
2076 /* Check for sticky abort flags in the DSCR */
2077 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2078 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2079 if (retval
!= ERROR_OK
)
2084 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2085 /* Abort occurred - clear it and exit */
2086 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2087 armv8_dpm_handle_exception(dpm
, true);
2095 static int aarch64_read_phys_memory(struct target
*target
,
2096 target_addr_t address
, uint32_t size
,
2097 uint32_t count
, uint8_t *buffer
)
2099 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2101 if (count
&& buffer
) {
2102 /* read memory through APB-AP */
2103 retval
= aarch64_mmu_modify(target
, 0);
2104 if (retval
!= ERROR_OK
)
2106 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2111 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2112 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2114 int mmu_enabled
= 0;
2117 /* determine if MMU was enabled on target stop */
2118 retval
= aarch64_mmu(target
, &mmu_enabled
);
2119 if (retval
!= ERROR_OK
)
2123 /* enable MMU as we could have disabled it for phys access */
2124 retval
= aarch64_mmu_modify(target
, 1);
2125 if (retval
!= ERROR_OK
)
2128 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2131 static int aarch64_write_phys_memory(struct target
*target
,
2132 target_addr_t address
, uint32_t size
,
2133 uint32_t count
, const uint8_t *buffer
)
2135 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2137 if (count
&& buffer
) {
2138 /* write memory through APB-AP */
2139 retval
= aarch64_mmu_modify(target
, 0);
2140 if (retval
!= ERROR_OK
)
2142 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2148 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2149 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2151 int mmu_enabled
= 0;
2154 /* determine if MMU was enabled on target stop */
2155 retval
= aarch64_mmu(target
, &mmu_enabled
);
2156 if (retval
!= ERROR_OK
)
2160 /* enable MMU as we could have disabled it for phys access */
2161 retval
= aarch64_mmu_modify(target
, 1);
2162 if (retval
!= ERROR_OK
)
2165 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2168 static int aarch64_handle_target_request(void *priv
)
2170 struct target
*target
= priv
;
2171 struct armv8_common
*armv8
= target_to_armv8(target
);
2174 if (!target_was_examined(target
))
2176 if (!target
->dbg_msg_enabled
)
2179 if (target
->state
== TARGET_RUNNING
) {
2182 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2183 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2185 /* check if we have data */
2186 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2187 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2188 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2189 if (retval
== ERROR_OK
) {
2190 target_request(target
, request
);
2191 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2192 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2200 static int aarch64_examine_first(struct target
*target
)
2202 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2203 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2204 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2205 struct aarch64_private_config
*pc
;
2207 int retval
= ERROR_OK
;
2208 uint64_t debug
, ttypr
;
2210 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2211 debug
= ttypr
= cpuid
= 0;
2213 retval
= dap_dp_init(swjdp
);
2214 if (retval
!= ERROR_OK
)
2217 /* Search for the APB-AB - it is needed for access to debug registers */
2218 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2219 if (retval
!= ERROR_OK
) {
2220 LOG_ERROR("Could not find APB-AP for debug access");
2224 retval
= mem_ap_init(armv8
->debug_ap
);
2225 if (retval
!= ERROR_OK
) {
2226 LOG_ERROR("Could not initialize the APB-AP");
2230 armv8
->debug_ap
->memaccess_tck
= 10;
2232 if (!target
->dbgbase_set
) {
2234 /* Get ROM Table base */
2236 int32_t coreidx
= target
->coreid
;
2237 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2238 if (retval
!= ERROR_OK
)
2240 /* Lookup 0x15 -- Processor DAP */
2241 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2242 &armv8
->debug_base
, &coreidx
);
2243 if (retval
!= ERROR_OK
)
2245 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2246 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2248 armv8
->debug_base
= target
->dbgbase
;
2250 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2251 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2252 if (retval
!= ERROR_OK
) {
2253 LOG_DEBUG("Examine %s failed", "oslock");
2257 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2258 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2259 if (retval
!= ERROR_OK
) {
2260 LOG_DEBUG("Examine %s failed", "CPUID");
2264 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2265 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2266 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2267 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2268 if (retval
!= ERROR_OK
) {
2269 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2272 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2273 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2274 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2275 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2276 if (retval
!= ERROR_OK
) {
2277 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2281 retval
= dap_run(armv8
->debug_ap
->dap
);
2282 if (retval
!= ERROR_OK
) {
2283 LOG_ERROR("%s: examination failed\n", target_name(target
));
2288 ttypr
= (ttypr
<< 32) | tmp0
;
2290 debug
= (debug
<< 32) | tmp2
;
2292 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2293 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2294 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2296 if (target
->private_config
== NULL
)
2299 pc
= (struct aarch64_private_config
*)target
->private_config
;
2300 if (pc
->cti
== NULL
)
2303 armv8
->cti
= pc
->cti
;
2305 retval
= aarch64_dpm_setup(aarch64
, debug
);
2306 if (retval
!= ERROR_OK
)
2309 /* Setup Breakpoint Register Pairs */
2310 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2311 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2312 aarch64
->brp_num_available
= aarch64
->brp_num
;
2313 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2314 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2315 aarch64
->brp_list
[i
].used
= 0;
2316 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2317 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2319 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2320 aarch64
->brp_list
[i
].value
= 0;
2321 aarch64
->brp_list
[i
].control
= 0;
2322 aarch64
->brp_list
[i
].BRPn
= i
;
2325 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2327 target
->state
= TARGET_UNKNOWN
;
2328 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2329 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2330 target_set_examined(target
);
2334 static int aarch64_examine(struct target
*target
)
2336 int retval
= ERROR_OK
;
2338 /* don't re-probe hardware after each reset */
2339 if (!target_was_examined(target
))
2340 retval
= aarch64_examine_first(target
);
2342 /* Configure core debug access */
2343 if (retval
== ERROR_OK
)
2344 retval
= aarch64_init_debug_access(target
);
2350 * Cortex-A8 target creation and initialization
2353 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2354 struct target
*target
)
2356 /* examine_first() does a bunch of this */
2360 static int aarch64_init_arch_info(struct target
*target
,
2361 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2363 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2365 /* Setup struct aarch64_common */
2366 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2367 /* tap has no dap initialized */
2369 tap
->dap
= dap_init();
2370 tap
->dap
->tap
= tap
;
2372 armv8
->arm
.dap
= tap
->dap
;
2374 /* register arch-specific functions */
2375 armv8
->examine_debug_reason
= NULL
;
2376 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2377 armv8
->pre_restore_context
= NULL
;
2378 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2380 armv8_init_arch_info(target
, armv8
);
2381 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2386 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2388 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2390 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2393 static int aarch64_mmu(struct target
*target
, int *enabled
)
2395 if (target
->state
!= TARGET_HALTED
) {
2396 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2397 return ERROR_TARGET_INVALID
;
2400 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2404 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2405 target_addr_t
*phys
)
2407 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2410 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2412 struct aarch64_private_config
*pc
;
2416 /* check if argv[0] is for us */
2417 arg
= Jim_GetString(goi
->argv
[0], NULL
);
2418 if (strcmp(arg
, "-cti"))
2419 return JIM_CONTINUE
;
2421 /* pop the argument from argv */
2422 e
= Jim_GetOpt_String(goi
, &arg
, NULL
);
2426 /* check if we have another option */
2427 if (goi
->argc
== 0) {
2428 Jim_WrongNumArgs(goi
->interp
, goi
->argc
, goi
->argv
, "-cti ?cti-name?");
2432 pc
= (struct aarch64_private_config
*)target
->private_config
;
2434 if (goi
->isconfigure
) {
2436 struct arm_cti
*cti
;
2437 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2440 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2445 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2446 target
->private_config
= pc
;
2450 if (goi
->argc
!= 0) {
2451 Jim_WrongNumArgs(goi
->interp
,
2452 goi
->argc
, goi
->argv
,
2457 if (pc
== NULL
|| pc
->cti
== NULL
) {
2458 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2461 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2467 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2469 struct target
*target
= get_current_target(CMD_CTX
);
2470 struct armv8_common
*armv8
= target_to_armv8(target
);
2472 return armv8_handle_cache_info_command(CMD_CTX
,
2473 &armv8
->armv8_mmu
.armv8_cache
);
2477 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2479 struct target
*target
= get_current_target(CMD_CTX
);
2480 if (!target_was_examined(target
)) {
2481 LOG_ERROR("target not examined yet");
2485 return aarch64_init_debug_access(target
);
2487 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2489 struct target
*target
= get_current_target(CMD_CTX
);
2490 /* check target is an smp target */
2491 struct target_list
*head
;
2492 struct target
*curr
;
2493 head
= target
->head
;
2495 if (head
!= (struct target_list
*)NULL
) {
2496 while (head
!= (struct target_list
*)NULL
) {
2497 curr
= head
->target
;
2501 /* fixes the target display to the debugger */
2502 target
->gdb_service
->target
= target
;
2507 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2509 struct target
*target
= get_current_target(CMD_CTX
);
2510 struct target_list
*head
;
2511 struct target
*curr
;
2512 head
= target
->head
;
2513 if (head
!= (struct target_list
*)NULL
) {
2515 while (head
!= (struct target_list
*)NULL
) {
2516 curr
= head
->target
;
2524 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2526 struct target
*target
= get_current_target(CMD_CTX
);
2527 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2529 static const Jim_Nvp nvp_maskisr_modes
[] = {
2530 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2531 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2532 { .name
= NULL
, .value
= -1 },
2537 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2538 if (n
->name
== NULL
) {
2539 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2540 return ERROR_COMMAND_SYNTAX_ERROR
;
2543 aarch64
->isrmasking_mode
= n
->value
;
2546 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2547 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2552 static const struct command_registration aarch64_exec_command_handlers
[] = {
2554 .name
= "cache_info",
2555 .handler
= aarch64_handle_cache_info_command
,
2556 .mode
= COMMAND_EXEC
,
2557 .help
= "display information about target caches",
2562 .handler
= aarch64_handle_dbginit_command
,
2563 .mode
= COMMAND_EXEC
,
2564 .help
= "Initialize core debug",
2567 { .name
= "smp_off",
2568 .handler
= aarch64_handle_smp_off_command
,
2569 .mode
= COMMAND_EXEC
,
2570 .help
= "Stop smp handling",
2575 .handler
= aarch64_handle_smp_on_command
,
2576 .mode
= COMMAND_EXEC
,
2577 .help
= "Restart smp handling",
2582 .handler
= aarch64_mask_interrupts_command
,
2583 .mode
= COMMAND_ANY
,
2584 .help
= "mask aarch64 interrupts during single-step",
2585 .usage
= "['on'|'off']",
2588 COMMAND_REGISTRATION_DONE
2590 static const struct command_registration aarch64_command_handlers
[] = {
2592 .chain
= armv8_command_handlers
,
2596 .mode
= COMMAND_ANY
,
2597 .help
= "Aarch64 command group",
2599 .chain
= aarch64_exec_command_handlers
,
2601 COMMAND_REGISTRATION_DONE
2604 struct target_type aarch64_target
= {
2607 .poll
= aarch64_poll
,
2608 .arch_state
= armv8_arch_state
,
2610 .halt
= aarch64_halt
,
2611 .resume
= aarch64_resume
,
2612 .step
= aarch64_step
,
2614 .assert_reset
= aarch64_assert_reset
,
2615 .deassert_reset
= aarch64_deassert_reset
,
2617 /* REVISIT allow exporting VFP3 registers ... */
2618 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2620 .read_memory
= aarch64_read_memory
,
2621 .write_memory
= aarch64_write_memory
,
2623 .add_breakpoint
= aarch64_add_breakpoint
,
2624 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2625 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2626 .remove_breakpoint
= aarch64_remove_breakpoint
,
2627 .add_watchpoint
= NULL
,
2628 .remove_watchpoint
= NULL
,
2630 .commands
= aarch64_command_handlers
,
2631 .target_create
= aarch64_target_create
,
2632 .target_jim_configure
= aarch64_jim_configure
,
2633 .init_target
= aarch64_init_target
,
2634 .examine
= aarch64_examine
,
2636 .read_phys_memory
= aarch64_read_phys_memory
,
2637 .write_phys_memory
= aarch64_write_phys_memory
,
2639 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)