1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
43 struct aarch64_private_config
{
44 struct adiv5_private_config adiv5_config
;
48 static int aarch64_poll(struct target
*target
);
49 static int aarch64_debug_entry(struct target
*target
);
50 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
51 static int aarch64_set_breakpoint(struct target
*target
,
52 struct breakpoint
*breakpoint
, uint8_t matchmode
);
53 static int aarch64_set_context_breakpoint(struct target
*target
,
54 struct breakpoint
*breakpoint
, uint8_t matchmode
);
55 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
56 struct breakpoint
*breakpoint
);
57 static int aarch64_unset_breakpoint(struct target
*target
,
58 struct breakpoint
*breakpoint
);
59 static int aarch64_mmu(struct target
*target
, int *enabled
);
60 static int aarch64_virt2phys(struct target
*target
,
61 target_addr_t virt
, target_addr_t
*phys
);
62 static int aarch64_read_cpu_memory(struct target
*target
,
63 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
65 #define foreach_smp_target(pos, head) \
66 for (pos = head; (pos != NULL); pos = pos->next)
68 static int aarch64_restore_system_control_reg(struct target
*target
)
70 enum arm_mode target_mode
= ARM_MODE_ANY
;
71 int retval
= ERROR_OK
;
74 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
75 struct armv8_common
*armv8
= target_to_armv8(target
);
77 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
78 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
79 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81 switch (armv8
->arm
.core_mode
) {
83 target_mode
= ARMV8_64_EL1H
;
87 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
91 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
95 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
102 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
106 LOG_INFO("cannot read system control register in this mode");
110 if (target_mode
!= ARM_MODE_ANY
)
111 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
113 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
114 if (retval
!= ERROR_OK
)
117 if (target_mode
!= ARM_MODE_ANY
)
118 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
124 /* modify system_control_reg in order to enable or disable mmu for :
125 * - virt2phys address conversion
126 * - read or write memory in phys or virt address */
127 static int aarch64_mmu_modify(struct target
*target
, int enable
)
129 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
130 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
131 int retval
= ERROR_OK
;
135 /* if mmu enabled at target stop and mmu not enable */
136 if (!(aarch64
->system_control_reg
& 0x1U
)) {
137 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
140 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
141 aarch64
->system_control_reg_curr
|= 0x1U
;
143 if (aarch64
->system_control_reg_curr
& 0x4U
) {
144 /* data cache is active */
145 aarch64
->system_control_reg_curr
&= ~0x4U
;
146 /* flush data cache armv8 function to be called */
147 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
148 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
150 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
151 aarch64
->system_control_reg_curr
&= ~0x1U
;
155 switch (armv8
->arm
.core_mode
) {
159 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
163 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
167 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
174 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
178 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
182 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
183 aarch64
->system_control_reg_curr
);
188 * Basic debug access, very low level assumes state is saved
190 static int aarch64_init_debug_access(struct target
*target
)
192 struct armv8_common
*armv8
= target_to_armv8(target
);
196 LOG_DEBUG("%s", target_name(target
));
198 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
199 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
200 if (retval
!= ERROR_OK
) {
201 LOG_DEBUG("Examine %s failed", "oslock");
205 /* Clear Sticky Power Down status Bit in PRSR to enable access to
206 the registers in the Core Power Domain */
207 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
208 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
209 if (retval
!= ERROR_OK
)
213 * Static CTI configuration:
214 * Channel 0 -> trigger outputs HALT request to PE
215 * Channel 1 -> trigger outputs Resume request to PE
216 * Gate all channel trigger events from entering the CTM
220 retval
= arm_cti_enable(armv8
->cti
, true);
221 /* By default, gate all channel events to and from the CTM */
222 if (retval
== ERROR_OK
)
223 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
224 /* output halt requests to PE on channel 0 event */
225 if (retval
== ERROR_OK
)
226 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
227 /* output restart requests to PE on channel 1 event */
228 if (retval
== ERROR_OK
)
229 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
230 if (retval
!= ERROR_OK
)
233 /* Resync breakpoint registers */
238 /* Write to memory mapped registers directly with no cache or mmu handling */
239 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
244 struct armv8_common
*armv8
= target_to_armv8(target
);
246 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
251 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
253 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
256 dpm
->arm
= &a8
->armv8_common
.arm
;
259 retval
= armv8_dpm_setup(dpm
);
260 if (retval
== ERROR_OK
)
261 retval
= armv8_dpm_initialize(dpm
);
266 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
268 struct armv8_common
*armv8
= target_to_armv8(target
);
269 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
272 static int aarch64_check_state_one(struct target
*target
,
273 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
275 struct armv8_common
*armv8
= target_to_armv8(target
);
279 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
280 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
281 if (retval
!= ERROR_OK
)
288 *p_result
= (prsr
& mask
) == (val
& mask
);
293 static int aarch64_wait_halt_one(struct target
*target
)
295 int retval
= ERROR_OK
;
298 int64_t then
= timeval_ms();
302 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
303 if (retval
!= ERROR_OK
|| halted
)
306 if (timeval_ms() > then
+ 1000) {
307 retval
= ERROR_TARGET_TIMEOUT
;
308 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
315 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
317 int retval
= ERROR_OK
;
318 struct target_list
*head
= target
->head
;
319 struct target
*first
= NULL
;
321 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
323 while (head
!= NULL
) {
324 struct target
*curr
= head
->target
;
325 struct armv8_common
*armv8
= target_to_armv8(curr
);
328 if (exc_target
&& curr
== target
)
330 if (!target_was_examined(curr
))
332 if (curr
->state
!= TARGET_RUNNING
)
335 /* HACK: mark this target as prepared for halting */
336 curr
->debug_reason
= DBG_REASON_DBGRQ
;
338 /* open the gate for channel 0 to let HALT requests pass to the CTM */
339 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
340 if (retval
== ERROR_OK
)
341 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
342 if (retval
!= ERROR_OK
)
345 LOG_DEBUG("target %s prepared", target_name(curr
));
352 if (exc_target
&& first
)
361 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
363 int retval
= ERROR_OK
;
364 struct armv8_common
*armv8
= target_to_armv8(target
);
366 LOG_DEBUG("%s", target_name(target
));
368 /* allow Halting Debug Mode */
369 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
370 if (retval
!= ERROR_OK
)
373 /* trigger an event on channel 0, this outputs a halt request to the PE */
374 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
375 if (retval
!= ERROR_OK
)
378 if (mode
== HALT_SYNC
) {
379 retval
= aarch64_wait_halt_one(target
);
380 if (retval
!= ERROR_OK
) {
381 if (retval
== ERROR_TARGET_TIMEOUT
)
382 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
390 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
392 struct target
*next
= target
;
395 /* prepare halt on all PEs of the group */
396 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
398 if (exc_target
&& next
== target
)
401 /* halt the target PE */
402 if (retval
== ERROR_OK
)
403 retval
= aarch64_halt_one(next
, HALT_LAZY
);
405 if (retval
!= ERROR_OK
)
408 /* wait for all PEs to halt */
409 int64_t then
= timeval_ms();
411 bool all_halted
= true;
412 struct target_list
*head
;
415 foreach_smp_target(head
, target
->head
) {
420 if (!target_was_examined(curr
))
423 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
424 if (retval
!= ERROR_OK
|| !halted
) {
433 if (timeval_ms() > then
+ 1000) {
434 retval
= ERROR_TARGET_TIMEOUT
;
439 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
440 * and it looks like the CTI's are not connected by a common
441 * trigger matrix. It seems that we need to halt one core in each
442 * cluster explicitly. So if we find that a core has not halted
443 * yet, we trigger an explicit halt for the second cluster.
445 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
446 if (retval
!= ERROR_OK
)
453 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
455 struct target
*gdb_target
= NULL
;
456 struct target_list
*head
;
459 if (debug_reason
== DBG_REASON_NOTHALTED
) {
460 LOG_DEBUG("Halting remaining targets in SMP group");
461 aarch64_halt_smp(target
, true);
464 /* poll all targets in the group, but skip the target that serves GDB */
465 foreach_smp_target(head
, target
->head
) {
467 /* skip calling context */
470 if (!target_was_examined(curr
))
472 /* skip targets that were already halted */
473 if (curr
->state
== TARGET_HALTED
)
475 /* remember the gdb_service->target */
476 if (curr
->gdb_service
!= NULL
)
477 gdb_target
= curr
->gdb_service
->target
;
479 if (curr
== gdb_target
)
482 /* avoid recursion in aarch64_poll() */
488 /* after all targets were updated, poll the gdb serving target */
489 if (gdb_target
!= NULL
&& gdb_target
!= target
)
490 aarch64_poll(gdb_target
);
496 * Aarch64 Run control
499 static int aarch64_poll(struct target
*target
)
501 enum target_state prev_target_state
;
502 int retval
= ERROR_OK
;
505 retval
= aarch64_check_state_one(target
,
506 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
507 if (retval
!= ERROR_OK
)
511 prev_target_state
= target
->state
;
512 if (prev_target_state
!= TARGET_HALTED
) {
513 enum target_debug_reason debug_reason
= target
->debug_reason
;
515 /* We have a halting debug event */
516 target
->state
= TARGET_HALTED
;
517 LOG_DEBUG("Target %s halted", target_name(target
));
518 retval
= aarch64_debug_entry(target
);
519 if (retval
!= ERROR_OK
)
523 update_halt_gdb(target
, debug_reason
);
525 switch (prev_target_state
) {
529 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
531 case TARGET_DEBUG_RUNNING
:
532 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
539 target
->state
= TARGET_RUNNING
;
544 static int aarch64_halt(struct target
*target
)
547 return aarch64_halt_smp(target
, false);
549 return aarch64_halt_one(target
, HALT_SYNC
);
552 static int aarch64_restore_one(struct target
*target
, int current
,
553 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
555 struct armv8_common
*armv8
= target_to_armv8(target
);
556 struct arm
*arm
= &armv8
->arm
;
560 LOG_DEBUG("%s", target_name(target
));
562 if (!debug_execution
)
563 target_free_all_working_areas(target
);
565 /* current = 1: continue on current pc, otherwise continue at <address> */
566 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
568 resume_pc
= *address
;
570 *address
= resume_pc
;
572 /* Make sure that the Armv7 gdb thumb fixups does not
573 * kill the return address
575 switch (arm
->core_state
) {
577 resume_pc
&= 0xFFFFFFFC;
579 case ARM_STATE_AARCH64
:
580 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
582 case ARM_STATE_THUMB
:
583 case ARM_STATE_THUMB_EE
:
584 /* When the return address is loaded into PC
585 * bit 0 must be 1 to stay in Thumb state
589 case ARM_STATE_JAZELLE
:
590 LOG_ERROR("How do I resume into Jazelle state??");
593 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
594 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
598 /* called it now before restoring context because it uses cpu
599 * register r0 for restoring system control register */
600 retval
= aarch64_restore_system_control_reg(target
);
601 if (retval
== ERROR_OK
)
602 retval
= aarch64_restore_context(target
, handle_breakpoints
);
608 * prepare single target for restart
612 static int aarch64_prepare_restart_one(struct target
*target
)
614 struct armv8_common
*armv8
= target_to_armv8(target
);
619 LOG_DEBUG("%s", target_name(target
));
621 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
622 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
623 if (retval
!= ERROR_OK
)
626 if ((dscr
& DSCR_ITE
) == 0)
627 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
628 if ((dscr
& DSCR_ERR
) != 0)
629 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
631 /* acknowledge a pending CTI halt event */
632 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
634 * open the CTI gate for channel 1 so that the restart events
635 * get passed along to all PEs. Also close gate for channel 0
636 * to isolate the PE from halt events.
638 if (retval
== ERROR_OK
)
639 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
640 if (retval
== ERROR_OK
)
641 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
643 /* make sure that DSCR.HDE is set */
644 if (retval
== ERROR_OK
) {
646 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
647 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
650 if (retval
== ERROR_OK
) {
651 /* clear sticky bits in PRSR, SDR is now 0 */
652 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
653 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
659 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
661 struct armv8_common
*armv8
= target_to_armv8(target
);
664 LOG_DEBUG("%s", target_name(target
));
666 /* trigger an event on channel 1, generates a restart request to the PE */
667 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
668 if (retval
!= ERROR_OK
)
671 if (mode
== RESTART_SYNC
) {
672 int64_t then
= timeval_ms();
676 * if PRSR.SDR is set now, the target did restart, even
677 * if it's now already halted again (e.g. due to breakpoint)
679 retval
= aarch64_check_state_one(target
,
680 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
681 if (retval
!= ERROR_OK
|| resumed
)
684 if (timeval_ms() > then
+ 1000) {
685 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
686 retval
= ERROR_TARGET_TIMEOUT
;
692 if (retval
!= ERROR_OK
)
695 target
->debug_reason
= DBG_REASON_NOTHALTED
;
696 target
->state
= TARGET_RUNNING
;
701 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
705 LOG_DEBUG("%s", target_name(target
));
707 retval
= aarch64_prepare_restart_one(target
);
708 if (retval
== ERROR_OK
)
709 retval
= aarch64_do_restart_one(target
, mode
);
715 * prepare all but the current target for restart
717 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
719 int retval
= ERROR_OK
;
720 struct target_list
*head
;
721 struct target
*first
= NULL
;
724 foreach_smp_target(head
, target
->head
) {
725 struct target
*curr
= head
->target
;
727 /* skip calling target */
730 if (!target_was_examined(curr
))
732 if (curr
->state
!= TARGET_HALTED
)
735 /* resume at current address, not in step mode */
736 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
737 if (retval
== ERROR_OK
)
738 retval
= aarch64_prepare_restart_one(curr
);
739 if (retval
!= ERROR_OK
) {
740 LOG_ERROR("failed to restore target %s", target_name(curr
));
743 /* remember the first valid target in the group */
755 static int aarch64_step_restart_smp(struct target
*target
)
757 int retval
= ERROR_OK
;
758 struct target_list
*head
;
759 struct target
*first
= NULL
;
761 LOG_DEBUG("%s", target_name(target
));
763 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
764 if (retval
!= ERROR_OK
)
768 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
769 if (retval
!= ERROR_OK
) {
770 LOG_DEBUG("error restarting target %s", target_name(first
));
774 int64_t then
= timeval_ms();
776 struct target
*curr
= target
;
777 bool all_resumed
= true;
779 foreach_smp_target(head
, target
->head
) {
788 if (!target_was_examined(curr
))
791 retval
= aarch64_check_state_one(curr
,
792 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
793 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
798 if (curr
->state
!= TARGET_RUNNING
) {
799 curr
->state
= TARGET_RUNNING
;
800 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
801 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
808 if (timeval_ms() > then
+ 1000) {
809 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
810 retval
= ERROR_TARGET_TIMEOUT
;
814 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
815 * and it looks like the CTI's are not connected by a common
816 * trigger matrix. It seems that we need to halt one core in each
817 * cluster explicitly. So if we find that a core has not halted
818 * yet, we trigger an explicit resume for the second cluster.
820 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
821 if (retval
!= ERROR_OK
)
828 static int aarch64_resume(struct target
*target
, int current
,
829 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
832 uint64_t addr
= address
;
834 if (target
->state
!= TARGET_HALTED
)
835 return ERROR_TARGET_NOT_HALTED
;
838 * If this target is part of a SMP group, prepare the others
839 * targets for resuming. This involves restoring the complete
840 * target register context and setting up CTI gates to accept
841 * resume events from the trigger matrix.
844 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
845 if (retval
!= ERROR_OK
)
849 /* all targets prepared, restore and restart the current target */
850 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
852 if (retval
== ERROR_OK
)
853 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
854 if (retval
!= ERROR_OK
)
858 int64_t then
= timeval_ms();
860 struct target
*curr
= target
;
861 struct target_list
*head
;
862 bool all_resumed
= true;
864 foreach_smp_target(head
, target
->head
) {
871 if (!target_was_examined(curr
))
874 retval
= aarch64_check_state_one(curr
,
875 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
876 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
881 if (curr
->state
!= TARGET_RUNNING
) {
882 curr
->state
= TARGET_RUNNING
;
883 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
884 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
891 if (timeval_ms() > then
+ 1000) {
892 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
893 retval
= ERROR_TARGET_TIMEOUT
;
898 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
899 * and it looks like the CTI's are not connected by a common
900 * trigger matrix. It seems that we need to halt one core in each
901 * cluster explicitly. So if we find that a core has not halted
902 * yet, we trigger an explicit resume for the second cluster.
904 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
905 if (retval
!= ERROR_OK
)
910 if (retval
!= ERROR_OK
)
913 target
->debug_reason
= DBG_REASON_NOTHALTED
;
915 if (!debug_execution
) {
916 target
->state
= TARGET_RUNNING
;
917 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
918 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
920 target
->state
= TARGET_DEBUG_RUNNING
;
921 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
922 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
928 static int aarch64_debug_entry(struct target
*target
)
930 int retval
= ERROR_OK
;
931 struct armv8_common
*armv8
= target_to_armv8(target
);
932 struct arm_dpm
*dpm
= &armv8
->dpm
;
933 enum arm_state core_state
;
936 /* make sure to clear all sticky errors */
937 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
938 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
939 if (retval
== ERROR_OK
)
940 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
941 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
942 if (retval
== ERROR_OK
)
943 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
945 if (retval
!= ERROR_OK
)
948 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
951 core_state
= armv8_dpm_get_core_state(dpm
);
952 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
953 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
955 /* close the CTI gate for all events */
956 if (retval
== ERROR_OK
)
957 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
958 /* discard async exceptions */
959 if (retval
== ERROR_OK
)
960 retval
= dpm
->instr_cpsr_sync(dpm
);
961 if (retval
!= ERROR_OK
)
964 /* Examine debug reason */
965 armv8_dpm_report_dscr(dpm
, dscr
);
967 /* save address of instruction that triggered the watchpoint? */
968 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
972 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
973 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
975 if (retval
!= ERROR_OK
)
979 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
980 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
982 if (retval
!= ERROR_OK
)
985 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
988 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
990 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
991 retval
= armv8
->post_debug_entry(target
);
996 static int aarch64_post_debug_entry(struct target
*target
)
998 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
999 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1001 enum arm_mode target_mode
= ARM_MODE_ANY
;
1004 switch (armv8
->arm
.core_mode
) {
1006 target_mode
= ARMV8_64_EL1H
;
1010 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1014 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1018 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1025 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1029 LOG_INFO("cannot read system control register in this mode");
1033 if (target_mode
!= ARM_MODE_ANY
)
1034 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1036 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1037 if (retval
!= ERROR_OK
)
1040 if (target_mode
!= ARM_MODE_ANY
)
1041 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1043 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1044 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1046 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1047 armv8_identify_cache(armv8
);
1048 armv8_read_mpidr(armv8
);
1051 armv8
->armv8_mmu
.mmu_enabled
=
1052 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1053 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1054 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1055 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1056 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1061 * single-step a target
1063 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1064 int handle_breakpoints
)
1066 struct armv8_common
*armv8
= target_to_armv8(target
);
1067 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1068 int saved_retval
= ERROR_OK
;
1072 if (target
->state
!= TARGET_HALTED
) {
1073 LOG_WARNING("target not halted");
1074 return ERROR_TARGET_NOT_HALTED
;
1077 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1078 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1079 /* make sure EDECR.SS is not set when restoring the register */
1081 if (retval
== ERROR_OK
) {
1083 /* set EDECR.SS to enter hardware step mode */
1084 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1085 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1087 /* disable interrupts while stepping */
1088 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1089 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1090 /* bail out if stepping setup has failed */
1091 if (retval
!= ERROR_OK
)
1094 if (target
->smp
&& (current
== 1)) {
1096 * isolate current target so that it doesn't get resumed
1097 * together with the others
1099 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1100 /* resume all other targets in the group */
1101 if (retval
== ERROR_OK
)
1102 retval
= aarch64_step_restart_smp(target
);
1103 if (retval
!= ERROR_OK
) {
1104 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1107 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1110 /* all other targets running, restore and restart the current target */
1111 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1112 if (retval
== ERROR_OK
)
1113 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1115 if (retval
!= ERROR_OK
)
1118 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1119 if (!handle_breakpoints
)
1120 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1122 int64_t then
= timeval_ms();
1127 retval
= aarch64_check_state_one(target
,
1128 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1129 if (retval
!= ERROR_OK
|| stepped
)
1132 if (timeval_ms() > then
+ 100) {
1133 LOG_ERROR("timeout waiting for target %s halt after step",
1134 target_name(target
));
1135 retval
= ERROR_TARGET_TIMEOUT
;
1141 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1142 * causes a timeout. The core takes the step but doesn't complete it and so
1143 * debug state is never entered. However, you can manually halt the core
1144 * as an external debug even is also a WFI wakeup event.
1146 if (retval
== ERROR_TARGET_TIMEOUT
)
1147 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1150 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1151 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1152 if (retval
!= ERROR_OK
)
1155 /* restore interrupts */
1156 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1157 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1158 if (retval
!= ERROR_OK
)
1162 if (saved_retval
!= ERROR_OK
)
1163 return saved_retval
;
1165 return aarch64_poll(target
);
1168 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1170 struct armv8_common
*armv8
= target_to_armv8(target
);
1171 struct arm
*arm
= &armv8
->arm
;
1175 LOG_DEBUG("%s", target_name(target
));
1177 if (armv8
->pre_restore_context
)
1178 armv8
->pre_restore_context(target
);
1180 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1181 if (retval
== ERROR_OK
) {
1182 /* registers are now invalid */
1183 register_cache_invalidate(arm
->core_cache
);
1184 register_cache_invalidate(arm
->core_cache
->next
);
1191 * Cortex-A8 Breakpoint and watchpoint functions
1194 /* Setup hardware Breakpoint Register Pair */
1195 static int aarch64_set_breakpoint(struct target
*target
,
1196 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1201 uint8_t byte_addr_select
= 0x0F;
1202 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1203 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1204 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1206 if (breakpoint
->set
) {
1207 LOG_WARNING("breakpoint already set");
1211 if (breakpoint
->type
== BKPT_HARD
) {
1213 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1215 if (brp_i
>= aarch64
->brp_num
) {
1216 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1217 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1219 breakpoint
->set
= brp_i
+ 1;
1220 if (breakpoint
->length
== 2)
1221 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1222 control
= ((matchmode
& 0x7) << 20)
1224 | (byte_addr_select
<< 5)
1226 brp_list
[brp_i
].used
= 1;
1227 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1228 brp_list
[brp_i
].control
= control
;
1229 bpt_value
= brp_list
[brp_i
].value
;
1231 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1232 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1233 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1234 if (retval
!= ERROR_OK
)
1236 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1237 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1238 (uint32_t)(bpt_value
>> 32));
1239 if (retval
!= ERROR_OK
)
1242 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1243 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1244 brp_list
[brp_i
].control
);
1245 if (retval
!= ERROR_OK
)
1247 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1248 brp_list
[brp_i
].control
,
1249 brp_list
[brp_i
].value
);
1251 } else if (breakpoint
->type
== BKPT_SOFT
) {
1254 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1255 retval
= target_read_memory(target
,
1256 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1257 breakpoint
->length
, 1,
1258 breakpoint
->orig_instr
);
1259 if (retval
!= ERROR_OK
)
1262 armv8_cache_d_inner_flush_virt(armv8
,
1263 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1264 breakpoint
->length
);
1266 retval
= target_write_memory(target
,
1267 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1268 breakpoint
->length
, 1, code
);
1269 if (retval
!= ERROR_OK
)
1272 armv8_cache_d_inner_flush_virt(armv8
,
1273 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1274 breakpoint
->length
);
1276 armv8_cache_i_inner_inval_virt(armv8
,
1277 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1278 breakpoint
->length
);
1280 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1283 /* Ensure that halting debug mode is enable */
1284 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1285 if (retval
!= ERROR_OK
) {
1286 LOG_DEBUG("Failed to set DSCR.HDE");
1293 static int aarch64_set_context_breakpoint(struct target
*target
,
1294 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1296 int retval
= ERROR_FAIL
;
1299 uint8_t byte_addr_select
= 0x0F;
1300 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1301 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1302 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1304 if (breakpoint
->set
) {
1305 LOG_WARNING("breakpoint already set");
1308 /*check available context BRPs*/
1309 while ((brp_list
[brp_i
].used
||
1310 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1313 if (brp_i
>= aarch64
->brp_num
) {
1314 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1318 breakpoint
->set
= brp_i
+ 1;
1319 control
= ((matchmode
& 0x7) << 20)
1321 | (byte_addr_select
<< 5)
1323 brp_list
[brp_i
].used
= 1;
1324 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1325 brp_list
[brp_i
].control
= control
;
1326 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1327 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1328 brp_list
[brp_i
].value
);
1329 if (retval
!= ERROR_OK
)
1331 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1332 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1333 brp_list
[brp_i
].control
);
1334 if (retval
!= ERROR_OK
)
1336 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1337 brp_list
[brp_i
].control
,
1338 brp_list
[brp_i
].value
);
1343 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1345 int retval
= ERROR_FAIL
;
1346 int brp_1
= 0; /* holds the contextID pair */
1347 int brp_2
= 0; /* holds the IVA pair */
1348 uint32_t control_CTX
, control_IVA
;
1349 uint8_t CTX_byte_addr_select
= 0x0F;
1350 uint8_t IVA_byte_addr_select
= 0x0F;
1351 uint8_t CTX_machmode
= 0x03;
1352 uint8_t IVA_machmode
= 0x01;
1353 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1354 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1355 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1357 if (breakpoint
->set
) {
1358 LOG_WARNING("breakpoint already set");
1361 /*check available context BRPs*/
1362 while ((brp_list
[brp_1
].used
||
1363 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1366 printf("brp(CTX) found num: %d\n", brp_1
);
1367 if (brp_1
>= aarch64
->brp_num
) {
1368 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1372 while ((brp_list
[brp_2
].used
||
1373 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1376 printf("brp(IVA) found num: %d\n", brp_2
);
1377 if (brp_2
>= aarch64
->brp_num
) {
1378 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1382 breakpoint
->set
= brp_1
+ 1;
1383 breakpoint
->linked_BRP
= brp_2
;
1384 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1387 | (CTX_byte_addr_select
<< 5)
1389 brp_list
[brp_1
].used
= 1;
1390 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1391 brp_list
[brp_1
].control
= control_CTX
;
1392 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1393 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1394 brp_list
[brp_1
].value
);
1395 if (retval
!= ERROR_OK
)
1397 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1398 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1399 brp_list
[brp_1
].control
);
1400 if (retval
!= ERROR_OK
)
1403 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1406 | (IVA_byte_addr_select
<< 5)
1408 brp_list
[brp_2
].used
= 1;
1409 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1410 brp_list
[brp_2
].control
= control_IVA
;
1411 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1412 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1413 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1414 if (retval
!= ERROR_OK
)
1416 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1417 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1418 brp_list
[brp_2
].value
>> 32);
1419 if (retval
!= ERROR_OK
)
1421 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1422 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1423 brp_list
[brp_2
].control
);
1424 if (retval
!= ERROR_OK
)
1430 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1433 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1434 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1435 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1437 if (!breakpoint
->set
) {
1438 LOG_WARNING("breakpoint not set");
1442 if (breakpoint
->type
== BKPT_HARD
) {
1443 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1444 int brp_i
= breakpoint
->set
- 1;
1445 int brp_j
= breakpoint
->linked_BRP
;
1446 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1447 LOG_DEBUG("Invalid BRP number in breakpoint");
1450 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1451 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1452 brp_list
[brp_i
].used
= 0;
1453 brp_list
[brp_i
].value
= 0;
1454 brp_list
[brp_i
].control
= 0;
1455 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1456 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1457 brp_list
[brp_i
].control
);
1458 if (retval
!= ERROR_OK
)
1460 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1461 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1462 (uint32_t)brp_list
[brp_i
].value
);
1463 if (retval
!= ERROR_OK
)
1465 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1466 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1467 (uint32_t)brp_list
[brp_i
].value
);
1468 if (retval
!= ERROR_OK
)
1470 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1471 LOG_DEBUG("Invalid BRP number in breakpoint");
1474 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1475 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1476 brp_list
[brp_j
].used
= 0;
1477 brp_list
[brp_j
].value
= 0;
1478 brp_list
[brp_j
].control
= 0;
1479 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1480 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1481 brp_list
[brp_j
].control
);
1482 if (retval
!= ERROR_OK
)
1484 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1485 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1486 (uint32_t)brp_list
[brp_j
].value
);
1487 if (retval
!= ERROR_OK
)
1489 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1490 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1491 (uint32_t)brp_list
[brp_j
].value
);
1492 if (retval
!= ERROR_OK
)
1495 breakpoint
->linked_BRP
= 0;
1496 breakpoint
->set
= 0;
1500 int brp_i
= breakpoint
->set
- 1;
1501 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1502 LOG_DEBUG("Invalid BRP number in breakpoint");
1505 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1506 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1507 brp_list
[brp_i
].used
= 0;
1508 brp_list
[brp_i
].value
= 0;
1509 brp_list
[brp_i
].control
= 0;
1510 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1511 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1512 brp_list
[brp_i
].control
);
1513 if (retval
!= ERROR_OK
)
1515 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1516 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1517 brp_list
[brp_i
].value
);
1518 if (retval
!= ERROR_OK
)
1521 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1522 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1523 (uint32_t)brp_list
[brp_i
].value
);
1524 if (retval
!= ERROR_OK
)
1526 breakpoint
->set
= 0;
1530 /* restore original instruction (kept in target endianness) */
1532 armv8_cache_d_inner_flush_virt(armv8
,
1533 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1534 breakpoint
->length
);
1536 if (breakpoint
->length
== 4) {
1537 retval
= target_write_memory(target
,
1538 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1539 4, 1, breakpoint
->orig_instr
);
1540 if (retval
!= ERROR_OK
)
1543 retval
= target_write_memory(target
,
1544 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1545 2, 1, breakpoint
->orig_instr
);
1546 if (retval
!= ERROR_OK
)
1550 armv8_cache_d_inner_flush_virt(armv8
,
1551 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1552 breakpoint
->length
);
1554 armv8_cache_i_inner_inval_virt(armv8
,
1555 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1556 breakpoint
->length
);
1558 breakpoint
->set
= 0;
1563 static int aarch64_add_breakpoint(struct target
*target
,
1564 struct breakpoint
*breakpoint
)
1566 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1568 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1569 LOG_INFO("no hardware breakpoint available");
1570 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1573 if (breakpoint
->type
== BKPT_HARD
)
1574 aarch64
->brp_num_available
--;
1576 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1579 static int aarch64_add_context_breakpoint(struct target
*target
,
1580 struct breakpoint
*breakpoint
)
1582 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1584 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1585 LOG_INFO("no hardware breakpoint available");
1586 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1589 if (breakpoint
->type
== BKPT_HARD
)
1590 aarch64
->brp_num_available
--;
1592 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1595 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1596 struct breakpoint
*breakpoint
)
1598 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1600 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1601 LOG_INFO("no hardware breakpoint available");
1602 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1605 if (breakpoint
->type
== BKPT_HARD
)
1606 aarch64
->brp_num_available
--;
1608 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1612 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1614 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1617 /* It is perfectly possible to remove breakpoints while the target is running */
1618 if (target
->state
!= TARGET_HALTED
) {
1619 LOG_WARNING("target not halted");
1620 return ERROR_TARGET_NOT_HALTED
;
1624 if (breakpoint
->set
) {
1625 aarch64_unset_breakpoint(target
, breakpoint
);
1626 if (breakpoint
->type
== BKPT_HARD
)
1627 aarch64
->brp_num_available
++;
1634 * Cortex-A8 Reset functions
1637 static int aarch64_assert_reset(struct target
*target
)
1639 struct armv8_common
*armv8
= target_to_armv8(target
);
1643 /* FIXME when halt is requested, make it work somehow... */
1645 /* Issue some kind of warm reset. */
1646 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1647 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1648 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1649 /* REVISIT handle "pulls" cases, if there's
1650 * hardware that needs them to work.
1652 jtag_add_reset(0, 1);
1654 LOG_ERROR("%s: how to reset?", target_name(target
));
1658 /* registers are now invalid */
1659 if (target_was_examined(target
)) {
1660 register_cache_invalidate(armv8
->arm
.core_cache
);
1661 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1664 target
->state
= TARGET_RESET
;
1669 static int aarch64_deassert_reset(struct target
*target
)
1675 /* be certain SRST is off */
1676 jtag_add_reset(0, 0);
1678 if (!target_was_examined(target
))
1681 retval
= aarch64_poll(target
);
1682 if (retval
!= ERROR_OK
)
1685 if (target
->reset_halt
) {
1686 if (target
->state
!= TARGET_HALTED
) {
1687 LOG_WARNING("%s: ran after reset and before halt ...",
1688 target_name(target
));
1689 retval
= target_halt(target
);
1690 if (retval
!= ERROR_OK
)
1695 return aarch64_init_debug_access(target
);
1698 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1699 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1701 struct armv8_common
*armv8
= target_to_armv8(target
);
1702 struct arm_dpm
*dpm
= &armv8
->dpm
;
1703 struct arm
*arm
= &armv8
->arm
;
1706 armv8_reg_current(arm
, 1)->dirty
= true;
1708 /* change DCC to normal mode if necessary */
1709 if (*dscr
& DSCR_MA
) {
1711 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1712 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1713 if (retval
!= ERROR_OK
)
1718 uint32_t data
, opcode
;
1720 /* write the data to store into DTRRX */
1724 data
= target_buffer_get_u16(target
, buffer
);
1726 data
= target_buffer_get_u32(target
, buffer
);
1727 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1728 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1729 if (retval
!= ERROR_OK
)
1732 if (arm
->core_state
== ARM_STATE_AARCH64
)
1733 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1735 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1736 if (retval
!= ERROR_OK
)
1740 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1742 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1744 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1745 retval
= dpm
->instr_execute(dpm
, opcode
);
1746 if (retval
!= ERROR_OK
)
1757 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1758 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1760 struct armv8_common
*armv8
= target_to_armv8(target
);
1761 struct arm
*arm
= &armv8
->arm
;
1764 armv8_reg_current(arm
, 1)->dirty
= true;
1766 /* Step 1.d - Change DCC to memory mode */
1768 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1769 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1770 if (retval
!= ERROR_OK
)
1774 /* Step 2.a - Do the write */
1775 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1776 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1777 if (retval
!= ERROR_OK
)
1780 /* Step 3.a - Switch DTR mode back to Normal mode */
1782 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1783 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1784 if (retval
!= ERROR_OK
)
1790 static int aarch64_write_cpu_memory(struct target
*target
,
1791 uint64_t address
, uint32_t size
,
1792 uint32_t count
, const uint8_t *buffer
)
1794 /* write memory through APB-AP */
1795 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1796 struct armv8_common
*armv8
= target_to_armv8(target
);
1797 struct arm_dpm
*dpm
= &armv8
->dpm
;
1798 struct arm
*arm
= &armv8
->arm
;
1801 if (target
->state
!= TARGET_HALTED
) {
1802 LOG_WARNING("target not halted");
1803 return ERROR_TARGET_NOT_HALTED
;
1806 /* Mark register X0 as dirty, as it will be used
1807 * for transferring the data.
1808 * It will be restored automatically when exiting
1811 armv8_reg_current(arm
, 0)->dirty
= true;
1813 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1816 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1817 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1818 if (retval
!= ERROR_OK
)
1821 /* Set Normal access mode */
1822 dscr
= (dscr
& ~DSCR_MA
);
1823 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1824 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1825 if (retval
!= ERROR_OK
)
1828 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1829 /* Write X0 with value 'address' using write procedure */
1830 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1831 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1832 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1833 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1835 /* Write R0 with value 'address' using write procedure */
1836 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1837 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1838 retval
= dpm
->instr_write_data_dcc(dpm
,
1839 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1842 if (retval
!= ERROR_OK
)
1845 if (size
== 4 && (address
% 4) == 0)
1846 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1848 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1850 if (retval
!= ERROR_OK
) {
1851 /* Unset DTR mode */
1852 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1853 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1855 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1856 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1859 /* Check for sticky abort flags in the DSCR */
1860 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1861 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1862 if (retval
!= ERROR_OK
)
1866 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1867 /* Abort occurred - clear it and exit */
1868 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1869 armv8_dpm_handle_exception(dpm
, true);
1877 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1878 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1880 struct armv8_common
*armv8
= target_to_armv8(target
);
1881 struct arm_dpm
*dpm
= &armv8
->dpm
;
1882 struct arm
*arm
= &armv8
->arm
;
1885 armv8_reg_current(arm
, 1)->dirty
= true;
1887 /* change DCC to normal mode (if necessary) */
1888 if (*dscr
& DSCR_MA
) {
1890 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1891 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1892 if (retval
!= ERROR_OK
)
1897 uint32_t opcode
, data
;
1900 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1902 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1904 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1905 retval
= dpm
->instr_execute(dpm
, opcode
);
1906 if (retval
!= ERROR_OK
)
1909 if (arm
->core_state
== ARM_STATE_AARCH64
)
1910 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1912 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1913 if (retval
!= ERROR_OK
)
1916 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1917 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1918 if (retval
!= ERROR_OK
)
1922 *buffer
= (uint8_t)data
;
1924 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1926 target_buffer_set_u32(target
, buffer
, data
);
1936 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1937 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1939 struct armv8_common
*armv8
= target_to_armv8(target
);
1940 struct arm_dpm
*dpm
= &armv8
->dpm
;
1941 struct arm
*arm
= &armv8
->arm
;
1945 /* Mark X1 as dirty */
1946 armv8_reg_current(arm
, 1)->dirty
= true;
1948 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1949 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1950 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1952 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1953 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1956 if (retval
!= ERROR_OK
)
1959 /* Step 1.e - Change DCC to memory mode */
1961 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1962 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1963 if (retval
!= ERROR_OK
)
1966 /* Step 1.f - read DBGDTRTX and discard the value */
1967 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1968 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1969 if (retval
!= ERROR_OK
)
1973 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1974 * Abort flags are sticky, so can be read at end of transactions
1976 * This data is read in aligned to 32 bit boundary.
1980 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1981 * increments X0 by 4. */
1982 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
1983 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1984 if (retval
!= ERROR_OK
)
1988 /* Step 3.a - set DTR access mode back to Normal mode */
1990 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1991 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1992 if (retval
!= ERROR_OK
)
1995 /* Step 3.b - read DBGDTRTX for the final value */
1996 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1997 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1998 if (retval
!= ERROR_OK
)
2001 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2005 static int aarch64_read_cpu_memory(struct target
*target
,
2006 target_addr_t address
, uint32_t size
,
2007 uint32_t count
, uint8_t *buffer
)
2009 /* read memory through APB-AP */
2010 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2011 struct armv8_common
*armv8
= target_to_armv8(target
);
2012 struct arm_dpm
*dpm
= &armv8
->dpm
;
2013 struct arm
*arm
= &armv8
->arm
;
2016 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2017 address
, size
, count
);
2019 if (target
->state
!= TARGET_HALTED
) {
2020 LOG_WARNING("target not halted");
2021 return ERROR_TARGET_NOT_HALTED
;
2024 /* Mark register X0 as dirty, as it will be used
2025 * for transferring the data.
2026 * It will be restored automatically when exiting
2029 armv8_reg_current(arm
, 0)->dirty
= true;
2032 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2033 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2034 if (retval
!= ERROR_OK
)
2037 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2039 /* Set Normal access mode */
2041 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2042 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2043 if (retval
!= ERROR_OK
)
2046 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2047 /* Write X0 with value 'address' using write procedure */
2048 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2049 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2050 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2051 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2053 /* Write R0 with value 'address' using write procedure */
2054 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2055 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2056 retval
= dpm
->instr_write_data_dcc(dpm
,
2057 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2060 if (retval
!= ERROR_OK
)
2063 if (size
== 4 && (address
% 4) == 0)
2064 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2066 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2068 if (dscr
& DSCR_MA
) {
2070 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2071 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2074 if (retval
!= ERROR_OK
)
2077 /* Check for sticky abort flags in the DSCR */
2078 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2079 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2080 if (retval
!= ERROR_OK
)
2085 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2086 /* Abort occurred - clear it and exit */
2087 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2088 armv8_dpm_handle_exception(dpm
, true);
2096 static int aarch64_read_phys_memory(struct target
*target
,
2097 target_addr_t address
, uint32_t size
,
2098 uint32_t count
, uint8_t *buffer
)
2100 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2102 if (count
&& buffer
) {
2103 /* read memory through APB-AP */
2104 retval
= aarch64_mmu_modify(target
, 0);
2105 if (retval
!= ERROR_OK
)
2107 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2112 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2113 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2115 int mmu_enabled
= 0;
2118 /* determine if MMU was enabled on target stop */
2119 retval
= aarch64_mmu(target
, &mmu_enabled
);
2120 if (retval
!= ERROR_OK
)
2124 /* enable MMU as we could have disabled it for phys access */
2125 retval
= aarch64_mmu_modify(target
, 1);
2126 if (retval
!= ERROR_OK
)
2129 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2132 static int aarch64_write_phys_memory(struct target
*target
,
2133 target_addr_t address
, uint32_t size
,
2134 uint32_t count
, const uint8_t *buffer
)
2136 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2138 if (count
&& buffer
) {
2139 /* write memory through APB-AP */
2140 retval
= aarch64_mmu_modify(target
, 0);
2141 if (retval
!= ERROR_OK
)
2143 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2149 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2150 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2152 int mmu_enabled
= 0;
2155 /* determine if MMU was enabled on target stop */
2156 retval
= aarch64_mmu(target
, &mmu_enabled
);
2157 if (retval
!= ERROR_OK
)
2161 /* enable MMU as we could have disabled it for phys access */
2162 retval
= aarch64_mmu_modify(target
, 1);
2163 if (retval
!= ERROR_OK
)
2166 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2169 static int aarch64_handle_target_request(void *priv
)
2171 struct target
*target
= priv
;
2172 struct armv8_common
*armv8
= target_to_armv8(target
);
2175 if (!target_was_examined(target
))
2177 if (!target
->dbg_msg_enabled
)
2180 if (target
->state
== TARGET_RUNNING
) {
2183 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2184 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2186 /* check if we have data */
2187 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2188 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2189 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2190 if (retval
== ERROR_OK
) {
2191 target_request(target
, request
);
2192 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2193 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2201 static int aarch64_examine_first(struct target
*target
)
2203 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2204 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2205 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2206 struct aarch64_private_config
*pc
;
2208 int retval
= ERROR_OK
;
2209 uint64_t debug
, ttypr
;
2211 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2212 debug
= ttypr
= cpuid
= 0;
2214 /* Search for the APB-AB - it is needed for access to debug registers */
2215 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2216 if (retval
!= ERROR_OK
) {
2217 LOG_ERROR("Could not find APB-AP for debug access");
2221 retval
= mem_ap_init(armv8
->debug_ap
);
2222 if (retval
!= ERROR_OK
) {
2223 LOG_ERROR("Could not initialize the APB-AP");
2227 armv8
->debug_ap
->memaccess_tck
= 10;
2229 if (!target
->dbgbase_set
) {
2231 /* Get ROM Table base */
2233 int32_t coreidx
= target
->coreid
;
2234 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2235 if (retval
!= ERROR_OK
)
2237 /* Lookup 0x15 -- Processor DAP */
2238 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2239 &armv8
->debug_base
, &coreidx
);
2240 if (retval
!= ERROR_OK
)
2242 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2243 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2245 armv8
->debug_base
= target
->dbgbase
;
2247 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2248 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2249 if (retval
!= ERROR_OK
) {
2250 LOG_DEBUG("Examine %s failed", "oslock");
2254 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2255 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2256 if (retval
!= ERROR_OK
) {
2257 LOG_DEBUG("Examine %s failed", "CPUID");
2261 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2262 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2263 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2264 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2265 if (retval
!= ERROR_OK
) {
2266 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2269 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2270 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2271 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2272 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2273 if (retval
!= ERROR_OK
) {
2274 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2278 retval
= dap_run(armv8
->debug_ap
->dap
);
2279 if (retval
!= ERROR_OK
) {
2280 LOG_ERROR("%s: examination failed\n", target_name(target
));
2285 ttypr
= (ttypr
<< 32) | tmp0
;
2287 debug
= (debug
<< 32) | tmp2
;
2289 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2290 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2291 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2293 if (target
->private_config
== NULL
)
2296 pc
= (struct aarch64_private_config
*)target
->private_config
;
2297 if (pc
->cti
== NULL
)
2300 armv8
->cti
= pc
->cti
;
2302 retval
= aarch64_dpm_setup(aarch64
, debug
);
2303 if (retval
!= ERROR_OK
)
2306 /* Setup Breakpoint Register Pairs */
2307 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2308 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2309 aarch64
->brp_num_available
= aarch64
->brp_num
;
2310 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2311 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2312 aarch64
->brp_list
[i
].used
= 0;
2313 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2314 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2316 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2317 aarch64
->brp_list
[i
].value
= 0;
2318 aarch64
->brp_list
[i
].control
= 0;
2319 aarch64
->brp_list
[i
].BRPn
= i
;
2322 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2324 target
->state
= TARGET_UNKNOWN
;
2325 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2326 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2327 target_set_examined(target
);
2331 static int aarch64_examine(struct target
*target
)
2333 int retval
= ERROR_OK
;
2335 /* don't re-probe hardware after each reset */
2336 if (!target_was_examined(target
))
2337 retval
= aarch64_examine_first(target
);
2339 /* Configure core debug access */
2340 if (retval
== ERROR_OK
)
2341 retval
= aarch64_init_debug_access(target
);
2347 * Cortex-A8 target creation and initialization
2350 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2351 struct target
*target
)
2353 /* examine_first() does a bunch of this */
2357 static int aarch64_init_arch_info(struct target
*target
,
2358 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2360 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2362 /* Setup struct aarch64_common */
2363 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2364 armv8
->arm
.dap
= dap
;
2366 /* register arch-specific functions */
2367 armv8
->examine_debug_reason
= NULL
;
2368 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2369 armv8
->pre_restore_context
= NULL
;
2370 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2372 armv8_init_arch_info(target
, armv8
);
2373 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2378 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2380 struct aarch64_private_config
*pc
= target
->private_config
;
2381 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2383 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2386 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2389 static int aarch64_mmu(struct target
*target
, int *enabled
)
2391 if (target
->state
!= TARGET_HALTED
) {
2392 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2393 return ERROR_TARGET_INVALID
;
2396 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2400 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2401 target_addr_t
*phys
)
2403 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2407 * private target configuration items
2409 enum aarch64_cfg_param
{
2413 static const Jim_Nvp nvp_config_opts
[] = {
2414 { .name
= "-cti", .value
= CFG_CTI
},
2415 { .name
= NULL
, .value
= -1 }
2418 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2420 struct aarch64_private_config
*pc
;
2424 pc
= (struct aarch64_private_config
*)target
->private_config
;
2426 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2427 target
->private_config
= pc
;
2431 * Call adiv5_jim_configure() to parse the common DAP options
2432 * It will return JIM_CONTINUE if it didn't find any known
2433 * options, JIM_OK if it correctly parsed the topmost option
2434 * and JIM_ERR if an error occured during parameter evaluation.
2435 * For JIM_CONTINUE, we check our own params.
2437 e
= adiv5_jim_configure(target
, goi
);
2438 if (e
!= JIM_CONTINUE
)
2441 /* parse config or cget options ... */
2442 if (goi
->argc
> 0) {
2443 Jim_SetEmptyResult(goi
->interp
);
2445 /* check first if topmost item is for us */
2446 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2449 return JIM_CONTINUE
;
2451 e
= Jim_GetOpt_Obj(goi
, NULL
);
2457 if (goi
->isconfigure
) {
2459 struct arm_cti
*cti
;
2460 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2463 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2465 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2470 if (goi
->argc
!= 0) {
2471 Jim_WrongNumArgs(goi
->interp
,
2472 goi
->argc
, goi
->argv
,
2477 if (pc
== NULL
|| pc
->cti
== NULL
) {
2478 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2481 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2487 return JIM_CONTINUE
;
2494 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2496 struct target
*target
= get_current_target(CMD_CTX
);
2497 struct armv8_common
*armv8
= target_to_armv8(target
);
2499 return armv8_handle_cache_info_command(CMD_CTX
,
2500 &armv8
->armv8_mmu
.armv8_cache
);
2504 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2506 struct target
*target
= get_current_target(CMD_CTX
);
2507 if (!target_was_examined(target
)) {
2508 LOG_ERROR("target not examined yet");
2512 return aarch64_init_debug_access(target
);
2514 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2516 struct target
*target
= get_current_target(CMD_CTX
);
2517 /* check target is an smp target */
2518 struct target_list
*head
;
2519 struct target
*curr
;
2520 head
= target
->head
;
2522 if (head
!= (struct target_list
*)NULL
) {
2523 while (head
!= (struct target_list
*)NULL
) {
2524 curr
= head
->target
;
2528 /* fixes the target display to the debugger */
2529 target
->gdb_service
->target
= target
;
2534 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2536 struct target
*target
= get_current_target(CMD_CTX
);
2537 struct target_list
*head
;
2538 struct target
*curr
;
2539 head
= target
->head
;
2540 if (head
!= (struct target_list
*)NULL
) {
2542 while (head
!= (struct target_list
*)NULL
) {
2543 curr
= head
->target
;
2551 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2553 struct target
*target
= get_current_target(CMD_CTX
);
2554 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2556 static const Jim_Nvp nvp_maskisr_modes
[] = {
2557 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2558 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2559 { .name
= NULL
, .value
= -1 },
2564 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2565 if (n
->name
== NULL
) {
2566 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2567 return ERROR_COMMAND_SYNTAX_ERROR
;
2570 aarch64
->isrmasking_mode
= n
->value
;
2573 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2574 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2579 static const struct command_registration aarch64_exec_command_handlers
[] = {
2581 .name
= "cache_info",
2582 .handler
= aarch64_handle_cache_info_command
,
2583 .mode
= COMMAND_EXEC
,
2584 .help
= "display information about target caches",
2589 .handler
= aarch64_handle_dbginit_command
,
2590 .mode
= COMMAND_EXEC
,
2591 .help
= "Initialize core debug",
2594 { .name
= "smp_off",
2595 .handler
= aarch64_handle_smp_off_command
,
2596 .mode
= COMMAND_EXEC
,
2597 .help
= "Stop smp handling",
2602 .handler
= aarch64_handle_smp_on_command
,
2603 .mode
= COMMAND_EXEC
,
2604 .help
= "Restart smp handling",
2609 .handler
= aarch64_mask_interrupts_command
,
2610 .mode
= COMMAND_ANY
,
2611 .help
= "mask aarch64 interrupts during single-step",
2612 .usage
= "['on'|'off']",
2615 COMMAND_REGISTRATION_DONE
2617 static const struct command_registration aarch64_command_handlers
[] = {
2619 .chain
= armv8_command_handlers
,
2623 .mode
= COMMAND_ANY
,
2624 .help
= "Aarch64 command group",
2626 .chain
= aarch64_exec_command_handlers
,
2628 COMMAND_REGISTRATION_DONE
2631 struct target_type aarch64_target
= {
2634 .poll
= aarch64_poll
,
2635 .arch_state
= armv8_arch_state
,
2637 .halt
= aarch64_halt
,
2638 .resume
= aarch64_resume
,
2639 .step
= aarch64_step
,
2641 .assert_reset
= aarch64_assert_reset
,
2642 .deassert_reset
= aarch64_deassert_reset
,
2644 /* REVISIT allow exporting VFP3 registers ... */
2645 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2647 .read_memory
= aarch64_read_memory
,
2648 .write_memory
= aarch64_write_memory
,
2650 .add_breakpoint
= aarch64_add_breakpoint
,
2651 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2652 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2653 .remove_breakpoint
= aarch64_remove_breakpoint
,
2654 .add_watchpoint
= NULL
,
2655 .remove_watchpoint
= NULL
,
2657 .commands
= aarch64_command_handlers
,
2658 .target_create
= aarch64_target_create
,
2659 .target_jim_configure
= aarch64_jim_configure
,
2660 .init_target
= aarch64_init_target
,
2661 .examine
= aarch64_examine
,
2663 .read_phys_memory
= aarch64_read_phys_memory
,
2664 .write_phys_memory
= aarch64_write_phys_memory
,
2666 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)