1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
26 #include "a64_disassembler.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_semihosting.h"
33 #include "jtag/interface.h"
35 #include <helper/time_support.h>
47 struct aarch64_private_config
{
48 struct adiv5_private_config adiv5_config
;
52 static int aarch64_poll(struct target
*target
);
53 static int aarch64_debug_entry(struct target
*target
);
54 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
55 static int aarch64_set_breakpoint(struct target
*target
,
56 struct breakpoint
*breakpoint
, uint8_t matchmode
);
57 static int aarch64_set_context_breakpoint(struct target
*target
,
58 struct breakpoint
*breakpoint
, uint8_t matchmode
);
59 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
60 struct breakpoint
*breakpoint
);
61 static int aarch64_unset_breakpoint(struct target
*target
,
62 struct breakpoint
*breakpoint
);
63 static int aarch64_mmu(struct target
*target
, int *enabled
);
64 static int aarch64_virt2phys(struct target
*target
,
65 target_addr_t virt
, target_addr_t
*phys
);
66 static int aarch64_read_cpu_memory(struct target
*target
,
67 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
69 static int aarch64_restore_system_control_reg(struct target
*target
)
71 enum arm_mode target_mode
= ARM_MODE_ANY
;
72 int retval
= ERROR_OK
;
75 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
76 struct armv8_common
*armv8
= target_to_armv8(target
);
78 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
79 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82 switch (armv8
->arm
.core_mode
) {
84 target_mode
= ARMV8_64_EL1H
;
88 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
92 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
96 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
105 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
109 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
110 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
114 if (target_mode
!= ARM_MODE_ANY
)
115 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
117 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
118 if (retval
!= ERROR_OK
)
121 if (target_mode
!= ARM_MODE_ANY
)
122 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
128 /* modify system_control_reg in order to enable or disable mmu for :
129 * - virt2phys address conversion
130 * - read or write memory in phys or virt address */
131 static int aarch64_mmu_modify(struct target
*target
, int enable
)
133 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
134 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
135 int retval
= ERROR_OK
;
139 /* if mmu enabled at target stop and mmu not enable */
140 if (!(aarch64
->system_control_reg
& 0x1U
)) {
141 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
144 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
145 aarch64
->system_control_reg_curr
|= 0x1U
;
147 if (aarch64
->system_control_reg_curr
& 0x4U
) {
148 /* data cache is active */
149 aarch64
->system_control_reg_curr
&= ~0x4U
;
150 /* flush data cache armv8 function to be called */
151 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
152 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
154 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
155 aarch64
->system_control_reg_curr
&= ~0x1U
;
159 switch (armv8
->arm
.core_mode
) {
163 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
167 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
171 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
180 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
184 LOG_DEBUG("unknown cpu state 0x%x", armv8
->arm
.core_mode
);
188 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
189 aarch64
->system_control_reg_curr
);
194 * Basic debug access, very low level assumes state is saved
196 static int aarch64_init_debug_access(struct target
*target
)
198 struct armv8_common
*armv8
= target_to_armv8(target
);
202 LOG_DEBUG("%s", target_name(target
));
204 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
205 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
206 if (retval
!= ERROR_OK
) {
207 LOG_DEBUG("Examine %s failed", "oslock");
211 /* Clear Sticky Power Down status Bit in PRSR to enable access to
212 the registers in the Core Power Domain */
213 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
214 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
215 if (retval
!= ERROR_OK
)
219 * Static CTI configuration:
220 * Channel 0 -> trigger outputs HALT request to PE
221 * Channel 1 -> trigger outputs Resume request to PE
222 * Gate all channel trigger events from entering the CTM
226 retval
= arm_cti_enable(armv8
->cti
, true);
227 /* By default, gate all channel events to and from the CTM */
228 if (retval
== ERROR_OK
)
229 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
230 /* output halt requests to PE on channel 0 event */
231 if (retval
== ERROR_OK
)
232 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
233 /* output restart requests to PE on channel 1 event */
234 if (retval
== ERROR_OK
)
235 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
236 if (retval
!= ERROR_OK
)
239 /* Resync breakpoint registers */
244 /* Write to memory mapped registers directly with no cache or mmu handling */
245 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
250 struct armv8_common
*armv8
= target_to_armv8(target
);
252 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
257 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
259 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
262 dpm
->arm
= &a8
->armv8_common
.arm
;
265 retval
= armv8_dpm_setup(dpm
);
266 if (retval
== ERROR_OK
)
267 retval
= armv8_dpm_initialize(dpm
);
272 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
274 struct armv8_common
*armv8
= target_to_armv8(target
);
275 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
278 static int aarch64_check_state_one(struct target
*target
,
279 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
281 struct armv8_common
*armv8
= target_to_armv8(target
);
285 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
286 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
287 if (retval
!= ERROR_OK
)
294 *p_result
= (prsr
& mask
) == (val
& mask
);
299 static int aarch64_wait_halt_one(struct target
*target
)
301 int retval
= ERROR_OK
;
304 int64_t then
= timeval_ms();
308 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
309 if (retval
!= ERROR_OK
|| halted
)
312 if (timeval_ms() > then
+ 1000) {
313 retval
= ERROR_TARGET_TIMEOUT
;
314 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
321 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
323 int retval
= ERROR_OK
;
324 struct target_list
*head
= target
->head
;
325 struct target
*first
= NULL
;
327 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
329 while (head
!= NULL
) {
330 struct target
*curr
= head
->target
;
331 struct armv8_common
*armv8
= target_to_armv8(curr
);
334 if (exc_target
&& curr
== target
)
336 if (!target_was_examined(curr
))
338 if (curr
->state
!= TARGET_RUNNING
)
341 /* HACK: mark this target as prepared for halting */
342 curr
->debug_reason
= DBG_REASON_DBGRQ
;
344 /* open the gate for channel 0 to let HALT requests pass to the CTM */
345 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
346 if (retval
== ERROR_OK
)
347 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
348 if (retval
!= ERROR_OK
)
351 LOG_DEBUG("target %s prepared", target_name(curr
));
358 if (exc_target
&& first
)
367 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
369 int retval
= ERROR_OK
;
370 struct armv8_common
*armv8
= target_to_armv8(target
);
372 LOG_DEBUG("%s", target_name(target
));
374 /* allow Halting Debug Mode */
375 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
376 if (retval
!= ERROR_OK
)
379 /* trigger an event on channel 0, this outputs a halt request to the PE */
380 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
381 if (retval
!= ERROR_OK
)
384 if (mode
== HALT_SYNC
) {
385 retval
= aarch64_wait_halt_one(target
);
386 if (retval
!= ERROR_OK
) {
387 if (retval
== ERROR_TARGET_TIMEOUT
)
388 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
396 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
398 struct target
*next
= target
;
401 /* prepare halt on all PEs of the group */
402 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
404 if (exc_target
&& next
== target
)
407 /* halt the target PE */
408 if (retval
== ERROR_OK
)
409 retval
= aarch64_halt_one(next
, HALT_LAZY
);
411 if (retval
!= ERROR_OK
)
414 /* wait for all PEs to halt */
415 int64_t then
= timeval_ms();
417 bool all_halted
= true;
418 struct target_list
*head
;
421 foreach_smp_target(head
, target
->head
) {
426 if (!target_was_examined(curr
))
429 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
430 if (retval
!= ERROR_OK
|| !halted
) {
439 if (timeval_ms() > then
+ 1000) {
440 retval
= ERROR_TARGET_TIMEOUT
;
445 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
446 * and it looks like the CTI's are not connected by a common
447 * trigger matrix. It seems that we need to halt one core in each
448 * cluster explicitly. So if we find that a core has not halted
449 * yet, we trigger an explicit halt for the second cluster.
451 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
452 if (retval
!= ERROR_OK
)
459 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
461 struct target
*gdb_target
= NULL
;
462 struct target_list
*head
;
465 if (debug_reason
== DBG_REASON_NOTHALTED
) {
466 LOG_DEBUG("Halting remaining targets in SMP group");
467 aarch64_halt_smp(target
, true);
470 /* poll all targets in the group, but skip the target that serves GDB */
471 foreach_smp_target(head
, target
->head
) {
473 /* skip calling context */
476 if (!target_was_examined(curr
))
478 /* skip targets that were already halted */
479 if (curr
->state
== TARGET_HALTED
)
481 /* remember the gdb_service->target */
482 if (curr
->gdb_service
!= NULL
)
483 gdb_target
= curr
->gdb_service
->target
;
485 if (curr
== gdb_target
)
488 /* avoid recursion in aarch64_poll() */
494 /* after all targets were updated, poll the gdb serving target */
495 if (gdb_target
!= NULL
&& gdb_target
!= target
)
496 aarch64_poll(gdb_target
);
502 * Aarch64 Run control
505 static int aarch64_poll(struct target
*target
)
507 enum target_state prev_target_state
;
508 int retval
= ERROR_OK
;
511 retval
= aarch64_check_state_one(target
,
512 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
513 if (retval
!= ERROR_OK
)
517 prev_target_state
= target
->state
;
518 if (prev_target_state
!= TARGET_HALTED
) {
519 enum target_debug_reason debug_reason
= target
->debug_reason
;
521 /* We have a halting debug event */
522 target
->state
= TARGET_HALTED
;
523 LOG_DEBUG("Target %s halted", target_name(target
));
524 retval
= aarch64_debug_entry(target
);
525 if (retval
!= ERROR_OK
)
529 update_halt_gdb(target
, debug_reason
);
531 if (arm_semihosting(target
, &retval
) != 0)
534 switch (prev_target_state
) {
538 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
540 case TARGET_DEBUG_RUNNING
:
541 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
548 target
->state
= TARGET_RUNNING
;
553 static int aarch64_halt(struct target
*target
)
555 struct armv8_common
*armv8
= target_to_armv8(target
);
556 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
559 return aarch64_halt_smp(target
, false);
561 return aarch64_halt_one(target
, HALT_SYNC
);
564 static int aarch64_restore_one(struct target
*target
, int current
,
565 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
567 struct armv8_common
*armv8
= target_to_armv8(target
);
568 struct arm
*arm
= &armv8
->arm
;
572 LOG_DEBUG("%s", target_name(target
));
574 if (!debug_execution
)
575 target_free_all_working_areas(target
);
577 /* current = 1: continue on current pc, otherwise continue at <address> */
578 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
580 resume_pc
= *address
;
582 *address
= resume_pc
;
584 /* Make sure that the Armv7 gdb thumb fixups does not
585 * kill the return address
587 switch (arm
->core_state
) {
589 resume_pc
&= 0xFFFFFFFC;
591 case ARM_STATE_AARCH64
:
592 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
594 case ARM_STATE_THUMB
:
595 case ARM_STATE_THUMB_EE
:
596 /* When the return address is loaded into PC
597 * bit 0 must be 1 to stay in Thumb state
601 case ARM_STATE_JAZELLE
:
602 LOG_ERROR("How do I resume into Jazelle state??");
605 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
606 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
607 arm
->pc
->dirty
= true;
608 arm
->pc
->valid
= true;
610 /* called it now before restoring context because it uses cpu
611 * register r0 for restoring system control register */
612 retval
= aarch64_restore_system_control_reg(target
);
613 if (retval
== ERROR_OK
)
614 retval
= aarch64_restore_context(target
, handle_breakpoints
);
620 * prepare single target for restart
624 static int aarch64_prepare_restart_one(struct target
*target
)
626 struct armv8_common
*armv8
= target_to_armv8(target
);
631 LOG_DEBUG("%s", target_name(target
));
633 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
634 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
635 if (retval
!= ERROR_OK
)
638 if ((dscr
& DSCR_ITE
) == 0)
639 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
640 if ((dscr
& DSCR_ERR
) != 0)
641 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
643 /* acknowledge a pending CTI halt event */
644 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
646 * open the CTI gate for channel 1 so that the restart events
647 * get passed along to all PEs. Also close gate for channel 0
648 * to isolate the PE from halt events.
650 if (retval
== ERROR_OK
)
651 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
652 if (retval
== ERROR_OK
)
653 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
655 /* make sure that DSCR.HDE is set */
656 if (retval
== ERROR_OK
) {
658 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
659 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
662 if (retval
== ERROR_OK
) {
663 /* clear sticky bits in PRSR, SDR is now 0 */
664 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
665 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
671 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
673 struct armv8_common
*armv8
= target_to_armv8(target
);
676 LOG_DEBUG("%s", target_name(target
));
678 /* trigger an event on channel 1, generates a restart request to the PE */
679 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
680 if (retval
!= ERROR_OK
)
683 if (mode
== RESTART_SYNC
) {
684 int64_t then
= timeval_ms();
688 * if PRSR.SDR is set now, the target did restart, even
689 * if it's now already halted again (e.g. due to breakpoint)
691 retval
= aarch64_check_state_one(target
,
692 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
693 if (retval
!= ERROR_OK
|| resumed
)
696 if (timeval_ms() > then
+ 1000) {
697 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
698 retval
= ERROR_TARGET_TIMEOUT
;
704 if (retval
!= ERROR_OK
)
707 target
->debug_reason
= DBG_REASON_NOTHALTED
;
708 target
->state
= TARGET_RUNNING
;
713 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
717 LOG_DEBUG("%s", target_name(target
));
719 retval
= aarch64_prepare_restart_one(target
);
720 if (retval
== ERROR_OK
)
721 retval
= aarch64_do_restart_one(target
, mode
);
727 * prepare all but the current target for restart
729 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
731 int retval
= ERROR_OK
;
732 struct target_list
*head
;
733 struct target
*first
= NULL
;
736 foreach_smp_target(head
, target
->head
) {
737 struct target
*curr
= head
->target
;
739 /* skip calling target */
742 if (!target_was_examined(curr
))
744 if (curr
->state
!= TARGET_HALTED
)
747 /* resume at current address, not in step mode */
748 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
749 if (retval
== ERROR_OK
)
750 retval
= aarch64_prepare_restart_one(curr
);
751 if (retval
!= ERROR_OK
) {
752 LOG_ERROR("failed to restore target %s", target_name(curr
));
755 /* remember the first valid target in the group */
767 static int aarch64_step_restart_smp(struct target
*target
)
769 int retval
= ERROR_OK
;
770 struct target_list
*head
;
771 struct target
*first
= NULL
;
773 LOG_DEBUG("%s", target_name(target
));
775 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
776 if (retval
!= ERROR_OK
)
780 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
781 if (retval
!= ERROR_OK
) {
782 LOG_DEBUG("error restarting target %s", target_name(first
));
786 int64_t then
= timeval_ms();
788 struct target
*curr
= target
;
789 bool all_resumed
= true;
791 foreach_smp_target(head
, target
->head
) {
800 if (!target_was_examined(curr
))
803 retval
= aarch64_check_state_one(curr
,
804 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
805 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
810 if (curr
->state
!= TARGET_RUNNING
) {
811 curr
->state
= TARGET_RUNNING
;
812 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
813 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
820 if (timeval_ms() > then
+ 1000) {
821 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
822 retval
= ERROR_TARGET_TIMEOUT
;
826 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
827 * and it looks like the CTI's are not connected by a common
828 * trigger matrix. It seems that we need to halt one core in each
829 * cluster explicitly. So if we find that a core has not halted
830 * yet, we trigger an explicit resume for the second cluster.
832 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
833 if (retval
!= ERROR_OK
)
840 static int aarch64_resume(struct target
*target
, int current
,
841 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
844 uint64_t addr
= address
;
846 struct armv8_common
*armv8
= target_to_armv8(target
);
847 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
849 if (target
->state
!= TARGET_HALTED
)
850 return ERROR_TARGET_NOT_HALTED
;
853 * If this target is part of a SMP group, prepare the others
854 * targets for resuming. This involves restoring the complete
855 * target register context and setting up CTI gates to accept
856 * resume events from the trigger matrix.
859 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
860 if (retval
!= ERROR_OK
)
864 /* all targets prepared, restore and restart the current target */
865 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
867 if (retval
== ERROR_OK
)
868 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
869 if (retval
!= ERROR_OK
)
873 int64_t then
= timeval_ms();
875 struct target
*curr
= target
;
876 struct target_list
*head
;
877 bool all_resumed
= true;
879 foreach_smp_target(head
, target
->head
) {
886 if (!target_was_examined(curr
))
889 retval
= aarch64_check_state_one(curr
,
890 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
891 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
896 if (curr
->state
!= TARGET_RUNNING
) {
897 curr
->state
= TARGET_RUNNING
;
898 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
899 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
906 if (timeval_ms() > then
+ 1000) {
907 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
908 retval
= ERROR_TARGET_TIMEOUT
;
913 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
914 * and it looks like the CTI's are not connected by a common
915 * trigger matrix. It seems that we need to halt one core in each
916 * cluster explicitly. So if we find that a core has not halted
917 * yet, we trigger an explicit resume for the second cluster.
919 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
920 if (retval
!= ERROR_OK
)
925 if (retval
!= ERROR_OK
)
928 target
->debug_reason
= DBG_REASON_NOTHALTED
;
930 if (!debug_execution
) {
931 target
->state
= TARGET_RUNNING
;
932 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
933 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
935 target
->state
= TARGET_DEBUG_RUNNING
;
936 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
937 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
943 static int aarch64_debug_entry(struct target
*target
)
945 int retval
= ERROR_OK
;
946 struct armv8_common
*armv8
= target_to_armv8(target
);
947 struct arm_dpm
*dpm
= &armv8
->dpm
;
948 enum arm_state core_state
;
951 /* make sure to clear all sticky errors */
952 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
953 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
954 if (retval
== ERROR_OK
)
955 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
956 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
957 if (retval
== ERROR_OK
)
958 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
960 if (retval
!= ERROR_OK
)
963 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
966 core_state
= armv8_dpm_get_core_state(dpm
);
967 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
968 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
970 /* close the CTI gate for all events */
971 if (retval
== ERROR_OK
)
972 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
973 /* discard async exceptions */
974 if (retval
== ERROR_OK
)
975 retval
= dpm
->instr_cpsr_sync(dpm
);
976 if (retval
!= ERROR_OK
)
979 /* Examine debug reason */
980 armv8_dpm_report_dscr(dpm
, dscr
);
982 /* save address of instruction that triggered the watchpoint? */
983 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
987 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
988 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
990 if (retval
!= ERROR_OK
)
994 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
995 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
997 if (retval
!= ERROR_OK
)
1000 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
1003 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1005 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1006 retval
= armv8
->post_debug_entry(target
);
1011 static int aarch64_post_debug_entry(struct target
*target
)
1013 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1014 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1016 enum arm_mode target_mode
= ARM_MODE_ANY
;
1019 switch (armv8
->arm
.core_mode
) {
1021 target_mode
= ARMV8_64_EL1H
;
1025 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1029 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1033 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1042 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1046 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1047 armv8_mode_name(armv8
->arm
.core_mode
), armv8
->arm
.core_mode
);
1051 if (target_mode
!= ARM_MODE_ANY
)
1052 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1054 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1055 if (retval
!= ERROR_OK
)
1058 if (target_mode
!= ARM_MODE_ANY
)
1059 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1061 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1062 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1064 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1065 armv8_identify_cache(armv8
);
1066 armv8_read_mpidr(armv8
);
1069 armv8
->armv8_mmu
.mmu_enabled
=
1070 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1071 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1072 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1073 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1074 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1079 * single-step a target
1081 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1082 int handle_breakpoints
)
1084 struct armv8_common
*armv8
= target_to_armv8(target
);
1085 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1086 int saved_retval
= ERROR_OK
;
1090 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1092 if (target
->state
!= TARGET_HALTED
) {
1093 LOG_WARNING("target not halted");
1094 return ERROR_TARGET_NOT_HALTED
;
1097 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1098 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1099 /* make sure EDECR.SS is not set when restoring the register */
1101 if (retval
== ERROR_OK
) {
1103 /* set EDECR.SS to enter hardware step mode */
1104 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1105 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1107 /* disable interrupts while stepping */
1108 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1109 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1110 /* bail out if stepping setup has failed */
1111 if (retval
!= ERROR_OK
)
1114 if (target
->smp
&& (current
== 1)) {
1116 * isolate current target so that it doesn't get resumed
1117 * together with the others
1119 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1120 /* resume all other targets in the group */
1121 if (retval
== ERROR_OK
)
1122 retval
= aarch64_step_restart_smp(target
);
1123 if (retval
!= ERROR_OK
) {
1124 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1127 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1130 /* all other targets running, restore and restart the current target */
1131 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1132 if (retval
== ERROR_OK
)
1133 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1135 if (retval
!= ERROR_OK
)
1138 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1139 if (!handle_breakpoints
)
1140 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1142 int64_t then
= timeval_ms();
1147 retval
= aarch64_check_state_one(target
,
1148 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1149 if (retval
!= ERROR_OK
|| stepped
)
1152 if (timeval_ms() > then
+ 100) {
1153 LOG_ERROR("timeout waiting for target %s halt after step",
1154 target_name(target
));
1155 retval
= ERROR_TARGET_TIMEOUT
;
1161 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1162 * causes a timeout. The core takes the step but doesn't complete it and so
1163 * debug state is never entered. However, you can manually halt the core
1164 * as an external debug even is also a WFI wakeup event.
1166 if (retval
== ERROR_TARGET_TIMEOUT
)
1167 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1170 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1171 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1172 if (retval
!= ERROR_OK
)
1175 /* restore interrupts */
1176 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1177 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1178 if (retval
!= ERROR_OK
)
1182 if (saved_retval
!= ERROR_OK
)
1183 return saved_retval
;
1188 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1190 struct armv8_common
*armv8
= target_to_armv8(target
);
1191 struct arm
*arm
= &armv8
->arm
;
1195 LOG_DEBUG("%s", target_name(target
));
1197 if (armv8
->pre_restore_context
)
1198 armv8
->pre_restore_context(target
);
1200 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1201 if (retval
== ERROR_OK
) {
1202 /* registers are now invalid */
1203 register_cache_invalidate(arm
->core_cache
);
1204 register_cache_invalidate(arm
->core_cache
->next
);
1211 * Cortex-A8 Breakpoint and watchpoint functions
1214 /* Setup hardware Breakpoint Register Pair */
1215 static int aarch64_set_breakpoint(struct target
*target
,
1216 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1221 uint8_t byte_addr_select
= 0x0F;
1222 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1223 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1224 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1226 if (breakpoint
->set
) {
1227 LOG_WARNING("breakpoint already set");
1231 if (breakpoint
->type
== BKPT_HARD
) {
1233 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1235 if (brp_i
>= aarch64
->brp_num
) {
1236 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1239 breakpoint
->set
= brp_i
+ 1;
1240 if (breakpoint
->length
== 2)
1241 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1242 control
= ((matchmode
& 0x7) << 20)
1244 | (byte_addr_select
<< 5)
1246 brp_list
[brp_i
].used
= 1;
1247 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1248 brp_list
[brp_i
].control
= control
;
1249 bpt_value
= brp_list
[brp_i
].value
;
1251 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1252 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1253 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1254 if (retval
!= ERROR_OK
)
1256 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1257 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1258 (uint32_t)(bpt_value
>> 32));
1259 if (retval
!= ERROR_OK
)
1262 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1263 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1264 brp_list
[brp_i
].control
);
1265 if (retval
!= ERROR_OK
)
1267 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1268 brp_list
[brp_i
].control
,
1269 brp_list
[brp_i
].value
);
1271 } else if (breakpoint
->type
== BKPT_SOFT
) {
1275 if (armv8_dpm_get_core_state(&armv8
->dpm
) == ARM_STATE_AARCH64
) {
1276 opcode
= ARMV8_HLT(11);
1278 if (breakpoint
->length
!= 4)
1279 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1282 * core_state is ARM_STATE_ARM
1283 * in that case the opcode depends on breakpoint length:
1284 * - if length == 4 => A32 opcode
1285 * - if length == 2 => T32 opcode
1286 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1287 * in that case the length should be changed from 3 to 4 bytes
1289 opcode
= (breakpoint
->length
== 4) ? ARMV8_HLT_A1(11) :
1290 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1292 if (breakpoint
->length
== 3)
1293 breakpoint
->length
= 4;
1296 buf_set_u32(code
, 0, 32, opcode
);
1298 retval
= target_read_memory(target
,
1299 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1300 breakpoint
->length
, 1,
1301 breakpoint
->orig_instr
);
1302 if (retval
!= ERROR_OK
)
1305 armv8_cache_d_inner_flush_virt(armv8
,
1306 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1307 breakpoint
->length
);
1309 retval
= target_write_memory(target
,
1310 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1311 breakpoint
->length
, 1, code
);
1312 if (retval
!= ERROR_OK
)
1315 armv8_cache_d_inner_flush_virt(armv8
,
1316 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1317 breakpoint
->length
);
1319 armv8_cache_i_inner_inval_virt(armv8
,
1320 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1321 breakpoint
->length
);
1323 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1326 /* Ensure that halting debug mode is enable */
1327 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1328 if (retval
!= ERROR_OK
) {
1329 LOG_DEBUG("Failed to set DSCR.HDE");
1336 static int aarch64_set_context_breakpoint(struct target
*target
,
1337 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1339 int retval
= ERROR_FAIL
;
1342 uint8_t byte_addr_select
= 0x0F;
1343 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1344 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1345 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1347 if (breakpoint
->set
) {
1348 LOG_WARNING("breakpoint already set");
1351 /*check available context BRPs*/
1352 while ((brp_list
[brp_i
].used
||
1353 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1356 if (brp_i
>= aarch64
->brp_num
) {
1357 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1361 breakpoint
->set
= brp_i
+ 1;
1362 control
= ((matchmode
& 0x7) << 20)
1364 | (byte_addr_select
<< 5)
1366 brp_list
[brp_i
].used
= 1;
1367 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1368 brp_list
[brp_i
].control
= control
;
1369 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1370 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1371 brp_list
[brp_i
].value
);
1372 if (retval
!= ERROR_OK
)
1374 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1375 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1376 brp_list
[brp_i
].control
);
1377 if (retval
!= ERROR_OK
)
1379 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1380 brp_list
[brp_i
].control
,
1381 brp_list
[brp_i
].value
);
1386 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1388 int retval
= ERROR_FAIL
;
1389 int brp_1
= 0; /* holds the contextID pair */
1390 int brp_2
= 0; /* holds the IVA pair */
1391 uint32_t control_CTX
, control_IVA
;
1392 uint8_t CTX_byte_addr_select
= 0x0F;
1393 uint8_t IVA_byte_addr_select
= 0x0F;
1394 uint8_t CTX_machmode
= 0x03;
1395 uint8_t IVA_machmode
= 0x01;
1396 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1397 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1398 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1400 if (breakpoint
->set
) {
1401 LOG_WARNING("breakpoint already set");
1404 /*check available context BRPs*/
1405 while ((brp_list
[brp_1
].used
||
1406 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1409 printf("brp(CTX) found num: %d\n", brp_1
);
1410 if (brp_1
>= aarch64
->brp_num
) {
1411 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1415 while ((brp_list
[brp_2
].used
||
1416 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1419 printf("brp(IVA) found num: %d\n", brp_2
);
1420 if (brp_2
>= aarch64
->brp_num
) {
1421 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1425 breakpoint
->set
= brp_1
+ 1;
1426 breakpoint
->linked_BRP
= brp_2
;
1427 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1430 | (CTX_byte_addr_select
<< 5)
1432 brp_list
[brp_1
].used
= 1;
1433 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1434 brp_list
[brp_1
].control
= control_CTX
;
1435 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1436 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1437 brp_list
[brp_1
].value
);
1438 if (retval
!= ERROR_OK
)
1440 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1441 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1442 brp_list
[brp_1
].control
);
1443 if (retval
!= ERROR_OK
)
1446 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1449 | (IVA_byte_addr_select
<< 5)
1451 brp_list
[brp_2
].used
= 1;
1452 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1453 brp_list
[brp_2
].control
= control_IVA
;
1454 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1455 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1456 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1457 if (retval
!= ERROR_OK
)
1459 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1460 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1461 brp_list
[brp_2
].value
>> 32);
1462 if (retval
!= ERROR_OK
)
1464 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1465 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1466 brp_list
[brp_2
].control
);
1467 if (retval
!= ERROR_OK
)
1473 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1476 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1477 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1478 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1480 if (!breakpoint
->set
) {
1481 LOG_WARNING("breakpoint not set");
1485 if (breakpoint
->type
== BKPT_HARD
) {
1486 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1487 int brp_i
= breakpoint
->set
- 1;
1488 int brp_j
= breakpoint
->linked_BRP
;
1489 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1490 LOG_DEBUG("Invalid BRP number in breakpoint");
1493 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1494 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1495 brp_list
[brp_i
].used
= 0;
1496 brp_list
[brp_i
].value
= 0;
1497 brp_list
[brp_i
].control
= 0;
1498 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1499 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1500 brp_list
[brp_i
].control
);
1501 if (retval
!= ERROR_OK
)
1503 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1504 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1505 (uint32_t)brp_list
[brp_i
].value
);
1506 if (retval
!= ERROR_OK
)
1508 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1509 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1510 (uint32_t)brp_list
[brp_i
].value
);
1511 if (retval
!= ERROR_OK
)
1513 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1514 LOG_DEBUG("Invalid BRP number in breakpoint");
1517 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1518 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1519 brp_list
[brp_j
].used
= 0;
1520 brp_list
[brp_j
].value
= 0;
1521 brp_list
[brp_j
].control
= 0;
1522 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1523 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1524 brp_list
[brp_j
].control
);
1525 if (retval
!= ERROR_OK
)
1527 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1528 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1529 (uint32_t)brp_list
[brp_j
].value
);
1530 if (retval
!= ERROR_OK
)
1532 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1533 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1534 (uint32_t)brp_list
[brp_j
].value
);
1535 if (retval
!= ERROR_OK
)
1538 breakpoint
->linked_BRP
= 0;
1539 breakpoint
->set
= 0;
1543 int brp_i
= breakpoint
->set
- 1;
1544 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1545 LOG_DEBUG("Invalid BRP number in breakpoint");
1548 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1549 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1550 brp_list
[brp_i
].used
= 0;
1551 brp_list
[brp_i
].value
= 0;
1552 brp_list
[brp_i
].control
= 0;
1553 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1554 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1555 brp_list
[brp_i
].control
);
1556 if (retval
!= ERROR_OK
)
1558 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1559 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1560 brp_list
[brp_i
].value
);
1561 if (retval
!= ERROR_OK
)
1564 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1565 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1566 (uint32_t)brp_list
[brp_i
].value
);
1567 if (retval
!= ERROR_OK
)
1569 breakpoint
->set
= 0;
1573 /* restore original instruction (kept in target endianness) */
1575 armv8_cache_d_inner_flush_virt(armv8
,
1576 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1577 breakpoint
->length
);
1579 if (breakpoint
->length
== 4) {
1580 retval
= target_write_memory(target
,
1581 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1582 4, 1, breakpoint
->orig_instr
);
1583 if (retval
!= ERROR_OK
)
1586 retval
= target_write_memory(target
,
1587 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1588 2, 1, breakpoint
->orig_instr
);
1589 if (retval
!= ERROR_OK
)
1593 armv8_cache_d_inner_flush_virt(armv8
,
1594 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1595 breakpoint
->length
);
1597 armv8_cache_i_inner_inval_virt(armv8
,
1598 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1599 breakpoint
->length
);
1601 breakpoint
->set
= 0;
1606 static int aarch64_add_breakpoint(struct target
*target
,
1607 struct breakpoint
*breakpoint
)
1609 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1611 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1612 LOG_INFO("no hardware breakpoint available");
1613 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1616 if (breakpoint
->type
== BKPT_HARD
)
1617 aarch64
->brp_num_available
--;
1619 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1622 static int aarch64_add_context_breakpoint(struct target
*target
,
1623 struct breakpoint
*breakpoint
)
1625 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1627 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1628 LOG_INFO("no hardware breakpoint available");
1629 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1632 if (breakpoint
->type
== BKPT_HARD
)
1633 aarch64
->brp_num_available
--;
1635 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1638 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1639 struct breakpoint
*breakpoint
)
1641 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1643 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1644 LOG_INFO("no hardware breakpoint available");
1645 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1648 if (breakpoint
->type
== BKPT_HARD
)
1649 aarch64
->brp_num_available
--;
1651 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1655 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1657 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1660 /* It is perfectly possible to remove breakpoints while the target is running */
1661 if (target
->state
!= TARGET_HALTED
) {
1662 LOG_WARNING("target not halted");
1663 return ERROR_TARGET_NOT_HALTED
;
1667 if (breakpoint
->set
) {
1668 aarch64_unset_breakpoint(target
, breakpoint
);
1669 if (breakpoint
->type
== BKPT_HARD
)
1670 aarch64
->brp_num_available
++;
1677 * Cortex-A8 Reset functions
1680 static int aarch64_enable_reset_catch(struct target
*target
, bool enable
)
1682 struct armv8_common
*armv8
= target_to_armv8(target
);
1686 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1687 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1688 LOG_DEBUG("EDECR = 0x%08" PRIx32
", enable=%d", edecr
, enable
);
1689 if (retval
!= ERROR_OK
)
1697 return mem_ap_write_atomic_u32(armv8
->debug_ap
,
1698 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1701 static int aarch64_clear_reset_catch(struct target
*target
)
1703 struct armv8_common
*armv8
= target_to_armv8(target
);
1708 /* check if Reset Catch debug event triggered as expected */
1709 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1710 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &edesr
);
1711 if (retval
!= ERROR_OK
)
1714 was_triggered
= !!(edesr
& ESR_RC
);
1715 LOG_DEBUG("Reset Catch debug event %s",
1716 was_triggered
? "triggered" : "NOT triggered!");
1718 if (was_triggered
) {
1719 /* clear pending Reset Catch debug event */
1721 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1722 armv8
->debug_base
+ CPUV8_DBG_EDESR
, edesr
);
1723 if (retval
!= ERROR_OK
)
1730 static int aarch64_assert_reset(struct target
*target
)
1732 struct armv8_common
*armv8
= target_to_armv8(target
);
1733 enum reset_types reset_config
= jtag_get_reset_config();
1738 /* Issue some kind of warm reset. */
1739 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1740 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1741 else if (reset_config
& RESET_HAS_SRST
) {
1742 bool srst_asserted
= false;
1744 if (target
->reset_halt
) {
1745 if (target_was_examined(target
)) {
1747 if (reset_config
& RESET_SRST_NO_GATING
) {
1749 * SRST needs to be asserted *before* Reset Catch
1750 * debug event can be set up.
1752 adapter_assert_reset();
1753 srst_asserted
= true;
1755 /* make sure to clear all sticky errors */
1756 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1757 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1760 /* set up Reset Catch debug event to halt the CPU after reset */
1761 retval
= aarch64_enable_reset_catch(target
, true);
1762 if (retval
!= ERROR_OK
)
1763 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1764 target_name(target
));
1766 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1767 target_name(target
));
1771 /* REVISIT handle "pulls" cases, if there's
1772 * hardware that needs them to work.
1775 adapter_assert_reset();
1777 LOG_ERROR("%s: how to reset?", target_name(target
));
1781 /* registers are now invalid */
1782 if (target_was_examined(target
)) {
1783 register_cache_invalidate(armv8
->arm
.core_cache
);
1784 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1787 target
->state
= TARGET_RESET
;
1792 static int aarch64_deassert_reset(struct target
*target
)
1798 /* be certain SRST is off */
1799 adapter_deassert_reset();
1801 if (!target_was_examined(target
))
1804 retval
= aarch64_init_debug_access(target
);
1805 if (retval
!= ERROR_OK
)
1808 retval
= aarch64_poll(target
);
1809 if (retval
!= ERROR_OK
)
1812 if (target
->reset_halt
) {
1813 /* clear pending Reset Catch debug event */
1814 retval
= aarch64_clear_reset_catch(target
);
1815 if (retval
!= ERROR_OK
)
1816 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
1817 target_name(target
));
1819 /* disable Reset Catch debug event */
1820 retval
= aarch64_enable_reset_catch(target
, false);
1821 if (retval
!= ERROR_OK
)
1822 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
1823 target_name(target
));
1825 if (target
->state
!= TARGET_HALTED
) {
1826 LOG_WARNING("%s: ran after reset and before halt ...",
1827 target_name(target
));
1828 retval
= target_halt(target
);
1829 if (retval
!= ERROR_OK
)
1837 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1838 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1840 struct armv8_common
*armv8
= target_to_armv8(target
);
1841 struct arm_dpm
*dpm
= &armv8
->dpm
;
1842 struct arm
*arm
= &armv8
->arm
;
1845 armv8_reg_current(arm
, 1)->dirty
= true;
1847 /* change DCC to normal mode if necessary */
1848 if (*dscr
& DSCR_MA
) {
1850 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1851 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1852 if (retval
!= ERROR_OK
)
1857 uint32_t data
, opcode
;
1859 /* write the data to store into DTRRX */
1863 data
= target_buffer_get_u16(target
, buffer
);
1865 data
= target_buffer_get_u32(target
, buffer
);
1866 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1867 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1868 if (retval
!= ERROR_OK
)
1871 if (arm
->core_state
== ARM_STATE_AARCH64
)
1872 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1874 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1875 if (retval
!= ERROR_OK
)
1879 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1881 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1883 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1884 retval
= dpm
->instr_execute(dpm
, opcode
);
1885 if (retval
!= ERROR_OK
)
1896 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1897 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1899 struct armv8_common
*armv8
= target_to_armv8(target
);
1900 struct arm
*arm
= &armv8
->arm
;
1903 armv8_reg_current(arm
, 1)->dirty
= true;
1905 /* Step 1.d - Change DCC to memory mode */
1907 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1908 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1909 if (retval
!= ERROR_OK
)
1913 /* Step 2.a - Do the write */
1914 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1915 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1916 if (retval
!= ERROR_OK
)
1919 /* Step 3.a - Switch DTR mode back to Normal mode */
1921 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1922 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1923 if (retval
!= ERROR_OK
)
1929 static int aarch64_write_cpu_memory(struct target
*target
,
1930 uint64_t address
, uint32_t size
,
1931 uint32_t count
, const uint8_t *buffer
)
1933 /* write memory through APB-AP */
1934 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1935 struct armv8_common
*armv8
= target_to_armv8(target
);
1936 struct arm_dpm
*dpm
= &armv8
->dpm
;
1937 struct arm
*arm
= &armv8
->arm
;
1940 if (target
->state
!= TARGET_HALTED
) {
1941 LOG_WARNING("target not halted");
1942 return ERROR_TARGET_NOT_HALTED
;
1945 /* Mark register X0 as dirty, as it will be used
1946 * for transferring the data.
1947 * It will be restored automatically when exiting
1950 armv8_reg_current(arm
, 0)->dirty
= true;
1952 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1955 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1956 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1957 if (retval
!= ERROR_OK
)
1960 /* Set Normal access mode */
1961 dscr
= (dscr
& ~DSCR_MA
);
1962 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1963 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1964 if (retval
!= ERROR_OK
)
1967 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1968 /* Write X0 with value 'address' using write procedure */
1969 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1970 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1971 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1972 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1974 /* Write R0 with value 'address' using write procedure */
1975 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1976 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1977 retval
= dpm
->instr_write_data_dcc(dpm
,
1978 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1981 if (retval
!= ERROR_OK
)
1984 if (size
== 4 && (address
% 4) == 0)
1985 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1987 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1989 if (retval
!= ERROR_OK
) {
1990 /* Unset DTR mode */
1991 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1992 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1994 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1995 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1998 /* Check for sticky abort flags in the DSCR */
1999 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2000 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2001 if (retval
!= ERROR_OK
)
2005 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2006 /* Abort occurred - clear it and exit */
2007 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2008 armv8_dpm_handle_exception(dpm
, true);
2016 static int aarch64_read_cpu_memory_slow(struct target
*target
,
2017 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2019 struct armv8_common
*armv8
= target_to_armv8(target
);
2020 struct arm_dpm
*dpm
= &armv8
->dpm
;
2021 struct arm
*arm
= &armv8
->arm
;
2024 armv8_reg_current(arm
, 1)->dirty
= true;
2026 /* change DCC to normal mode (if necessary) */
2027 if (*dscr
& DSCR_MA
) {
2029 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2030 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2031 if (retval
!= ERROR_OK
)
2036 uint32_t opcode
, data
;
2039 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
2041 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
2043 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
2044 retval
= dpm
->instr_execute(dpm
, opcode
);
2045 if (retval
!= ERROR_OK
)
2048 if (arm
->core_state
== ARM_STATE_AARCH64
)
2049 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
2051 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2052 if (retval
!= ERROR_OK
)
2055 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2056 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
2057 if (retval
!= ERROR_OK
)
2061 *buffer
= (uint8_t)data
;
2063 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
2065 target_buffer_set_u32(target
, buffer
, data
);
2075 static int aarch64_read_cpu_memory_fast(struct target
*target
,
2076 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
2078 struct armv8_common
*armv8
= target_to_armv8(target
);
2079 struct arm_dpm
*dpm
= &armv8
->dpm
;
2080 struct arm
*arm
= &armv8
->arm
;
2084 /* Mark X1 as dirty */
2085 armv8_reg_current(arm
, 1)->dirty
= true;
2087 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2088 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2089 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
2091 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2092 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2095 if (retval
!= ERROR_OK
)
2098 /* Step 1.e - Change DCC to memory mode */
2100 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2101 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2102 if (retval
!= ERROR_OK
)
2105 /* Step 1.f - read DBGDTRTX and discard the value */
2106 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2107 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2108 if (retval
!= ERROR_OK
)
2112 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2113 * Abort flags are sticky, so can be read at end of transactions
2115 * This data is read in aligned to 32 bit boundary.
2119 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2120 * increments X0 by 4. */
2121 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
2122 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2123 if (retval
!= ERROR_OK
)
2127 /* Step 3.a - set DTR access mode back to Normal mode */
2129 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2130 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2131 if (retval
!= ERROR_OK
)
2134 /* Step 3.b - read DBGDTRTX for the final value */
2135 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2136 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2137 if (retval
!= ERROR_OK
)
2140 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2144 static int aarch64_read_cpu_memory(struct target
*target
,
2145 target_addr_t address
, uint32_t size
,
2146 uint32_t count
, uint8_t *buffer
)
2148 /* read memory through APB-AP */
2149 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2150 struct armv8_common
*armv8
= target_to_armv8(target
);
2151 struct arm_dpm
*dpm
= &armv8
->dpm
;
2152 struct arm
*arm
= &armv8
->arm
;
2155 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2156 address
, size
, count
);
2158 if (target
->state
!= TARGET_HALTED
) {
2159 LOG_WARNING("target not halted");
2160 return ERROR_TARGET_NOT_HALTED
;
2163 /* Mark register X0 as dirty, as it will be used
2164 * for transferring the data.
2165 * It will be restored automatically when exiting
2168 armv8_reg_current(arm
, 0)->dirty
= true;
2171 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2172 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2173 if (retval
!= ERROR_OK
)
2176 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2178 /* Set Normal access mode */
2180 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2181 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2182 if (retval
!= ERROR_OK
)
2185 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2186 /* Write X0 with value 'address' using write procedure */
2187 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2188 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2189 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2190 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2192 /* Write R0 with value 'address' using write procedure */
2193 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2194 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2195 retval
= dpm
->instr_write_data_dcc(dpm
,
2196 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2199 if (retval
!= ERROR_OK
)
2202 if (size
== 4 && (address
% 4) == 0)
2203 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2205 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2207 if (dscr
& DSCR_MA
) {
2209 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2210 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2213 if (retval
!= ERROR_OK
)
2216 /* Check for sticky abort flags in the DSCR */
2217 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2218 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2219 if (retval
!= ERROR_OK
)
2224 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2225 /* Abort occurred - clear it and exit */
2226 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2227 armv8_dpm_handle_exception(dpm
, true);
2235 static int aarch64_read_phys_memory(struct target
*target
,
2236 target_addr_t address
, uint32_t size
,
2237 uint32_t count
, uint8_t *buffer
)
2239 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2241 if (count
&& buffer
) {
2242 /* read memory through APB-AP */
2243 retval
= aarch64_mmu_modify(target
, 0);
2244 if (retval
!= ERROR_OK
)
2246 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2251 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2252 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2254 int mmu_enabled
= 0;
2257 /* determine if MMU was enabled on target stop */
2258 retval
= aarch64_mmu(target
, &mmu_enabled
);
2259 if (retval
!= ERROR_OK
)
2263 /* enable MMU as we could have disabled it for phys access */
2264 retval
= aarch64_mmu_modify(target
, 1);
2265 if (retval
!= ERROR_OK
)
2268 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2271 static int aarch64_write_phys_memory(struct target
*target
,
2272 target_addr_t address
, uint32_t size
,
2273 uint32_t count
, const uint8_t *buffer
)
2275 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2277 if (count
&& buffer
) {
2278 /* write memory through APB-AP */
2279 retval
= aarch64_mmu_modify(target
, 0);
2280 if (retval
!= ERROR_OK
)
2282 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2288 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2289 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2291 int mmu_enabled
= 0;
2294 /* determine if MMU was enabled on target stop */
2295 retval
= aarch64_mmu(target
, &mmu_enabled
);
2296 if (retval
!= ERROR_OK
)
2300 /* enable MMU as we could have disabled it for phys access */
2301 retval
= aarch64_mmu_modify(target
, 1);
2302 if (retval
!= ERROR_OK
)
2305 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2308 static int aarch64_handle_target_request(void *priv
)
2310 struct target
*target
= priv
;
2311 struct armv8_common
*armv8
= target_to_armv8(target
);
2314 if (!target_was_examined(target
))
2316 if (!target
->dbg_msg_enabled
)
2319 if (target
->state
== TARGET_RUNNING
) {
2322 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2323 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2325 /* check if we have data */
2326 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2327 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2328 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2329 if (retval
== ERROR_OK
) {
2330 target_request(target
, request
);
2331 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2332 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2340 static int aarch64_examine_first(struct target
*target
)
2342 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2343 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2344 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2345 struct aarch64_private_config
*pc
= target
->private_config
;
2347 int retval
= ERROR_OK
;
2348 uint64_t debug
, ttypr
;
2350 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2351 debug
= ttypr
= cpuid
= 0;
2356 if (pc
->adiv5_config
.ap_num
== DP_APSEL_INVALID
) {
2357 /* Search for the APB-AB */
2358 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2359 if (retval
!= ERROR_OK
) {
2360 LOG_ERROR("Could not find APB-AP for debug access");
2364 armv8
->debug_ap
= dap_ap(swjdp
, pc
->adiv5_config
.ap_num
);
2367 retval
= mem_ap_init(armv8
->debug_ap
);
2368 if (retval
!= ERROR_OK
) {
2369 LOG_ERROR("Could not initialize the APB-AP");
2373 armv8
->debug_ap
->memaccess_tck
= 10;
2375 if (!target
->dbgbase_set
) {
2377 /* Get ROM Table base */
2379 int32_t coreidx
= target
->coreid
;
2380 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2381 if (retval
!= ERROR_OK
)
2383 /* Lookup 0x15 -- Processor DAP */
2384 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2385 &armv8
->debug_base
, &coreidx
);
2386 if (retval
!= ERROR_OK
)
2388 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2389 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2391 armv8
->debug_base
= target
->dbgbase
;
2393 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2394 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2395 if (retval
!= ERROR_OK
) {
2396 LOG_DEBUG("Examine %s failed", "oslock");
2400 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2401 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2402 if (retval
!= ERROR_OK
) {
2403 LOG_DEBUG("Examine %s failed", "CPUID");
2407 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2408 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2409 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2410 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2411 if (retval
!= ERROR_OK
) {
2412 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2415 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2416 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2417 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2418 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2419 if (retval
!= ERROR_OK
) {
2420 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2424 retval
= dap_run(armv8
->debug_ap
->dap
);
2425 if (retval
!= ERROR_OK
) {
2426 LOG_ERROR("%s: examination failed\n", target_name(target
));
2431 ttypr
= (ttypr
<< 32) | tmp0
;
2433 debug
= (debug
<< 32) | tmp2
;
2435 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2436 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2437 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2439 if (pc
->cti
== NULL
)
2442 armv8
->cti
= pc
->cti
;
2444 retval
= aarch64_dpm_setup(aarch64
, debug
);
2445 if (retval
!= ERROR_OK
)
2448 /* Setup Breakpoint Register Pairs */
2449 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2450 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2451 aarch64
->brp_num_available
= aarch64
->brp_num
;
2452 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2453 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2454 aarch64
->brp_list
[i
].used
= 0;
2455 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2456 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2458 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2459 aarch64
->brp_list
[i
].value
= 0;
2460 aarch64
->brp_list
[i
].control
= 0;
2461 aarch64
->brp_list
[i
].BRPn
= i
;
2464 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2466 target
->state
= TARGET_UNKNOWN
;
2467 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2468 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2469 target_set_examined(target
);
2473 static int aarch64_examine(struct target
*target
)
2475 int retval
= ERROR_OK
;
2477 /* don't re-probe hardware after each reset */
2478 if (!target_was_examined(target
))
2479 retval
= aarch64_examine_first(target
);
2481 /* Configure core debug access */
2482 if (retval
== ERROR_OK
)
2483 retval
= aarch64_init_debug_access(target
);
2489 * Cortex-A8 target creation and initialization
2492 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2493 struct target
*target
)
2495 /* examine_first() does a bunch of this */
2496 arm_semihosting_init(target
);
2500 static int aarch64_init_arch_info(struct target
*target
,
2501 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2503 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2505 /* Setup struct aarch64_common */
2506 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2507 armv8
->arm
.dap
= dap
;
2509 /* register arch-specific functions */
2510 armv8
->examine_debug_reason
= NULL
;
2511 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2512 armv8
->pre_restore_context
= NULL
;
2513 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2515 armv8_init_arch_info(target
, armv8
);
2516 target_register_timer_callback(aarch64_handle_target_request
, 1,
2517 TARGET_TIMER_TYPE_PERIODIC
, target
);
2522 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2524 struct aarch64_private_config
*pc
= target
->private_config
;
2525 struct aarch64_common
*aarch64
;
2527 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2530 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2531 if (aarch64
== NULL
) {
2532 LOG_ERROR("Out of memory");
2536 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2539 static void aarch64_deinit_target(struct target
*target
)
2541 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2542 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2543 struct arm_dpm
*dpm
= &armv8
->dpm
;
2545 armv8_free_reg_cache(target
);
2546 free(aarch64
->brp_list
);
2549 free(target
->private_config
);
2553 static int aarch64_mmu(struct target
*target
, int *enabled
)
2555 if (target
->state
!= TARGET_HALTED
) {
2556 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2557 return ERROR_TARGET_INVALID
;
2560 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2564 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2565 target_addr_t
*phys
)
2567 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2571 * private target configuration items
2573 enum aarch64_cfg_param
{
2577 static const Jim_Nvp nvp_config_opts
[] = {
2578 { .name
= "-cti", .value
= CFG_CTI
},
2579 { .name
= NULL
, .value
= -1 }
2582 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2584 struct aarch64_private_config
*pc
;
2588 pc
= (struct aarch64_private_config
*)target
->private_config
;
2590 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2591 pc
->adiv5_config
.ap_num
= DP_APSEL_INVALID
;
2592 target
->private_config
= pc
;
2596 * Call adiv5_jim_configure() to parse the common DAP options
2597 * It will return JIM_CONTINUE if it didn't find any known
2598 * options, JIM_OK if it correctly parsed the topmost option
2599 * and JIM_ERR if an error occurred during parameter evaluation.
2600 * For JIM_CONTINUE, we check our own params.
2602 * adiv5_jim_configure() assumes 'private_config' to point to
2603 * 'struct adiv5_private_config'. Override 'private_config'!
2605 target
->private_config
= &pc
->adiv5_config
;
2606 e
= adiv5_jim_configure(target
, goi
);
2607 target
->private_config
= pc
;
2608 if (e
!= JIM_CONTINUE
)
2611 /* parse config or cget options ... */
2612 if (goi
->argc
> 0) {
2613 Jim_SetEmptyResult(goi
->interp
);
2615 /* check first if topmost item is for us */
2616 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2619 return JIM_CONTINUE
;
2621 e
= Jim_GetOpt_Obj(goi
, NULL
);
2627 if (goi
->isconfigure
) {
2629 struct arm_cti
*cti
;
2630 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2633 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2635 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2640 if (goi
->argc
!= 0) {
2641 Jim_WrongNumArgs(goi
->interp
,
2642 goi
->argc
, goi
->argv
,
2647 if (pc
== NULL
|| pc
->cti
== NULL
) {
2648 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2651 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2657 return JIM_CONTINUE
;
2664 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2666 struct target
*target
= get_current_target(CMD_CTX
);
2667 struct armv8_common
*armv8
= target_to_armv8(target
);
2669 return armv8_handle_cache_info_command(CMD
,
2670 &armv8
->armv8_mmu
.armv8_cache
);
2673 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2675 struct target
*target
= get_current_target(CMD_CTX
);
2676 if (!target_was_examined(target
)) {
2677 LOG_ERROR("target not examined yet");
2681 return aarch64_init_debug_access(target
);
2684 COMMAND_HANDLER(aarch64_handle_disassemble_command
)
2686 struct target
*target
= get_current_target(CMD_CTX
);
2688 if (target
== NULL
) {
2689 LOG_ERROR("No target selected");
2693 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2695 if (aarch64
->common_magic
!= AARCH64_COMMON_MAGIC
) {
2696 command_print(CMD
, "current target isn't an AArch64");
2701 target_addr_t address
;
2705 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[1], count
);
2708 COMMAND_PARSE_ADDRESS(CMD_ARGV
[0], address
);
2711 return ERROR_COMMAND_SYNTAX_ERROR
;
2714 return a64_disassemble(CMD
, target
, address
, count
);
2717 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2719 struct target
*target
= get_current_target(CMD_CTX
);
2720 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2722 static const Jim_Nvp nvp_maskisr_modes
[] = {
2723 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2724 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2725 { .name
= NULL
, .value
= -1 },
2730 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2731 if (n
->name
== NULL
) {
2732 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2733 return ERROR_COMMAND_SYNTAX_ERROR
;
2736 aarch64
->isrmasking_mode
= n
->value
;
2739 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2740 command_print(CMD
, "aarch64 interrupt mask %s", n
->name
);
2745 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2747 struct command_context
*context
;
2748 struct target
*target
;
2751 bool is_mcr
= false;
2754 if (Jim_CompareStringImmediate(interp
, argv
[0], "mcr")) {
2761 context
= current_command_context(interp
);
2762 assert(context
!= NULL
);
2764 target
= get_current_target(context
);
2765 if (target
== NULL
) {
2766 LOG_ERROR("%s: no current target", __func__
);
2769 if (!target_was_examined(target
)) {
2770 LOG_ERROR("%s: not yet examined", target_name(target
));
2774 arm
= target_to_arm(target
);
2776 LOG_ERROR("%s: not an ARM", target_name(target
));
2780 if (target
->state
!= TARGET_HALTED
)
2781 return ERROR_TARGET_NOT_HALTED
;
2783 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2784 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
2788 if (argc
!= arg_cnt
) {
2789 LOG_ERROR("%s: wrong number of arguments", __func__
);
2801 /* NOTE: parameter sequence matches ARM instruction set usage:
2802 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2803 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2804 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2806 retval
= Jim_GetLong(interp
, argv
[1], &l
);
2807 if (retval
!= JIM_OK
)
2810 LOG_ERROR("%s: %s %d out of range", __func__
,
2811 "coprocessor", (int) l
);
2816 retval
= Jim_GetLong(interp
, argv
[2], &l
);
2817 if (retval
!= JIM_OK
)
2820 LOG_ERROR("%s: %s %d out of range", __func__
,
2826 retval
= Jim_GetLong(interp
, argv
[3], &l
);
2827 if (retval
!= JIM_OK
)
2830 LOG_ERROR("%s: %s %d out of range", __func__
,
2836 retval
= Jim_GetLong(interp
, argv
[4], &l
);
2837 if (retval
!= JIM_OK
)
2840 LOG_ERROR("%s: %s %d out of range", __func__
,
2846 retval
= Jim_GetLong(interp
, argv
[5], &l
);
2847 if (retval
!= JIM_OK
)
2850 LOG_ERROR("%s: %s %d out of range", __func__
,
2858 if (is_mcr
== true) {
2859 retval
= Jim_GetLong(interp
, argv
[6], &l
);
2860 if (retval
!= JIM_OK
)
2864 /* NOTE: parameters reordered! */
2865 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2866 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
2867 if (retval
!= ERROR_OK
)
2870 /* NOTE: parameters reordered! */
2871 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2872 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
2873 if (retval
!= ERROR_OK
)
2876 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
2882 static const struct command_registration aarch64_exec_command_handlers
[] = {
2884 .name
= "cache_info",
2885 .handler
= aarch64_handle_cache_info_command
,
2886 .mode
= COMMAND_EXEC
,
2887 .help
= "display information about target caches",
2892 .handler
= aarch64_handle_dbginit_command
,
2893 .mode
= COMMAND_EXEC
,
2894 .help
= "Initialize core debug",
2898 .name
= "disassemble",
2899 .handler
= aarch64_handle_disassemble_command
,
2900 .mode
= COMMAND_EXEC
,
2901 .help
= "Disassemble instructions",
2902 .usage
= "address [count]",
2906 .handler
= aarch64_mask_interrupts_command
,
2907 .mode
= COMMAND_ANY
,
2908 .help
= "mask aarch64 interrupts during single-step",
2909 .usage
= "['on'|'off']",
2913 .mode
= COMMAND_EXEC
,
2914 .jim_handler
= jim_mcrmrc
,
2915 .help
= "write coprocessor register",
2916 .usage
= "cpnum op1 CRn CRm op2 value",
2920 .mode
= COMMAND_EXEC
,
2921 .jim_handler
= jim_mcrmrc
,
2922 .help
= "read coprocessor register",
2923 .usage
= "cpnum op1 CRn CRm op2",
2926 .chain
= smp_command_handlers
,
2930 COMMAND_REGISTRATION_DONE
2933 extern const struct command_registration semihosting_common_handlers
[];
2935 static const struct command_registration aarch64_command_handlers
[] = {
2938 .mode
= COMMAND_ANY
,
2939 .help
= "ARM Command Group",
2941 .chain
= semihosting_common_handlers
2944 .chain
= armv8_command_handlers
,
2948 .mode
= COMMAND_ANY
,
2949 .help
= "Aarch64 command group",
2951 .chain
= aarch64_exec_command_handlers
,
2953 COMMAND_REGISTRATION_DONE
2956 struct target_type aarch64_target
= {
2959 .poll
= aarch64_poll
,
2960 .arch_state
= armv8_arch_state
,
2962 .halt
= aarch64_halt
,
2963 .resume
= aarch64_resume
,
2964 .step
= aarch64_step
,
2966 .assert_reset
= aarch64_assert_reset
,
2967 .deassert_reset
= aarch64_deassert_reset
,
2969 /* REVISIT allow exporting VFP3 registers ... */
2970 .get_gdb_arch
= armv8_get_gdb_arch
,
2971 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2973 .read_memory
= aarch64_read_memory
,
2974 .write_memory
= aarch64_write_memory
,
2976 .add_breakpoint
= aarch64_add_breakpoint
,
2977 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2978 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2979 .remove_breakpoint
= aarch64_remove_breakpoint
,
2980 .add_watchpoint
= NULL
,
2981 .remove_watchpoint
= NULL
,
2983 .commands
= aarch64_command_handlers
,
2984 .target_create
= aarch64_target_create
,
2985 .target_jim_configure
= aarch64_jim_configure
,
2986 .init_target
= aarch64_init_target
,
2987 .deinit_target
= aarch64_deinit_target
,
2988 .examine
= aarch64_examine
,
2990 .read_phys_memory
= aarch64_read_phys_memory
,
2991 .write_phys_memory
= aarch64_write_phys_memory
,
2993 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)