1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
50 static int aarch64_restore_system_control_reg(struct target
*target
)
52 enum arm_mode target_mode
= ARM_MODE_ANY
;
53 int retval
= ERROR_OK
;
56 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
57 struct armv8_common
*armv8
= target_to_armv8(target
);
59 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
60 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
63 switch (armv8
->arm
.core_mode
) {
65 target_mode
= ARMV8_64_EL1H
;
69 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
73 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
77 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
84 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
88 LOG_INFO("cannot read system control register in this mode");
92 if (target_mode
!= ARM_MODE_ANY
)
93 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
95 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
96 if (retval
!= ERROR_OK
)
99 if (target_mode
!= ARM_MODE_ANY
)
100 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
106 /* check address before aarch64_apb read write access with mmu on
107 * remove apb predictible data abort */
108 static int aarch64_check_address(struct target
*target
, uint32_t address
)
113 /* modify system_control_reg in order to enable or disable mmu for :
114 * - virt2phys address conversion
115 * - read or write memory in phys or virt address */
116 static int aarch64_mmu_modify(struct target
*target
, int enable
)
118 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
119 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
120 int retval
= ERROR_OK
;
124 /* if mmu enabled at target stop and mmu not enable */
125 if (!(aarch64
->system_control_reg
& 0x1U
)) {
126 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
129 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
130 aarch64
->system_control_reg_curr
|= 0x1U
;
132 if (aarch64
->system_control_reg_curr
& 0x4U
) {
133 /* data cache is active */
134 aarch64
->system_control_reg_curr
&= ~0x4U
;
135 /* flush data cache armv8 function to be called */
136 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
137 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
139 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
140 aarch64
->system_control_reg_curr
&= ~0x1U
;
144 switch (armv8
->arm
.core_mode
) {
148 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
152 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
156 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
159 LOG_DEBUG("unknown cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
163 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
164 aarch64
->system_control_reg_curr
);
169 * Basic debug access, very low level assumes state is saved
171 static int aarch64_init_debug_access(struct target
*target
)
173 struct armv8_common
*armv8
= target_to_armv8(target
);
179 /* Clear Sticky Power Down status Bit in PRSR to enable access to
180 the registers in the Core Power Domain */
181 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
182 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
183 if (retval
!= ERROR_OK
)
187 * Static CTI configuration:
188 * Channel 0 -> trigger outputs HALT request to PE
189 * Channel 1 -> trigger outputs Resume request to PE
190 * Gate all channel trigger events from entering the CTM
194 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
195 armv8
->cti_base
+ CTI_CTR
, 1);
196 /* By default, gate all channel triggers to and from the CTM */
197 if (retval
== ERROR_OK
)
198 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
199 armv8
->cti_base
+ CTI_GATE
, 0);
200 /* output halt requests to PE on channel 0 trigger */
201 if (retval
== ERROR_OK
)
202 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
203 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
204 /* output restart requests to PE on channel 1 trigger */
205 if (retval
== ERROR_OK
)
206 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
207 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
208 if (retval
!= ERROR_OK
)
211 /* Resync breakpoint registers */
213 /* Since this is likely called from init or reset, update target state information*/
214 return aarch64_poll(target
);
217 /* Write to memory mapped registers directly with no cache or mmu handling */
218 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
223 struct armv8_common
*armv8
= target_to_armv8(target
);
225 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
230 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
232 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
235 dpm
->arm
= &a8
->armv8_common
.arm
;
238 retval
= armv8_dpm_setup(dpm
);
239 if (retval
== ERROR_OK
)
240 retval
= armv8_dpm_initialize(dpm
);
245 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
247 struct armv8_common
*armv8
= target_to_armv8(target
);
251 int retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
252 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
253 if (ERROR_OK
!= retval
)
259 dscr
|= value
& bit_mask
;
262 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
263 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
267 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
269 struct target_list
*head
;
273 while (head
!= (struct target_list
*)NULL
) {
275 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
281 static int aarch64_halt(struct target
*target
);
283 static int aarch64_halt_smp(struct target
*target
)
285 int retval
= ERROR_OK
;
286 struct target_list
*head
= target
->head
;
288 while (head
!= (struct target_list
*)NULL
) {
289 struct target
*curr
= head
->target
;
290 struct armv8_common
*armv8
= target_to_armv8(curr
);
292 /* open the gate for channel 0 to let HALT requests pass to the CTM */
294 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
295 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
296 if (retval
== ERROR_OK
)
297 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
299 if (retval
!= ERROR_OK
)
305 /* halt the target PE */
306 if (retval
== ERROR_OK
)
307 retval
= aarch64_halt(target
);
312 static int update_halt_gdb(struct target
*target
)
315 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
316 target
->gdb_service
->target
= target
;
317 target
->gdb_service
->core
[0] = target
->coreid
;
318 retval
+= aarch64_halt_smp(target
);
324 * Cortex-A8 Run control
327 static int aarch64_poll(struct target
*target
)
329 int retval
= ERROR_OK
;
331 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
332 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
333 enum target_state prev_target_state
= target
->state
;
334 /* toggle to another core is done by gdb as follow */
335 /* maint packet J core_id */
337 /* the next polling trigger an halt event sent to gdb */
338 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
339 (target
->gdb_service
) &&
340 (target
->gdb_service
->target
== NULL
)) {
341 target
->gdb_service
->target
=
342 get_aarch64(target
, target
->gdb_service
->core
[1]);
343 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
346 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
347 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
348 if (retval
!= ERROR_OK
)
350 aarch64
->cpudbg_dscr
= dscr
;
352 if (DSCR_RUN_MODE(dscr
) == 0x3) {
353 if (prev_target_state
!= TARGET_HALTED
) {
354 /* We have a halting debug event */
355 LOG_DEBUG("Target %s halted", target_name(target
));
356 target
->state
= TARGET_HALTED
;
357 if ((prev_target_state
== TARGET_RUNNING
)
358 || (prev_target_state
== TARGET_UNKNOWN
)
359 || (prev_target_state
== TARGET_RESET
)) {
360 retval
= aarch64_debug_entry(target
);
361 if (retval
!= ERROR_OK
)
364 retval
= update_halt_gdb(target
);
365 if (retval
!= ERROR_OK
)
368 target_call_event_callbacks(target
,
369 TARGET_EVENT_HALTED
);
371 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
374 retval
= aarch64_debug_entry(target
);
375 if (retval
!= ERROR_OK
)
378 retval
= update_halt_gdb(target
);
379 if (retval
!= ERROR_OK
)
383 target_call_event_callbacks(target
,
384 TARGET_EVENT_DEBUG_HALTED
);
388 target
->state
= TARGET_RUNNING
;
393 static int aarch64_halt(struct target
*target
)
395 int retval
= ERROR_OK
;
397 struct armv8_common
*armv8
= target_to_armv8(target
);
400 * add HDE in halting debug mode
402 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
403 if (retval
!= ERROR_OK
)
406 /* trigger an event on channel 0, this outputs a halt request to the PE */
407 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
408 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
409 if (retval
!= ERROR_OK
)
412 long long then
= timeval_ms();
414 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
415 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
416 if (retval
!= ERROR_OK
)
418 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
420 if (timeval_ms() > then
+ 1000) {
421 LOG_ERROR("Timeout waiting for halt");
426 target
->debug_reason
= DBG_REASON_DBGRQ
;
431 static int aarch64_internal_restore(struct target
*target
, int current
,
432 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
434 struct armv8_common
*armv8
= target_to_armv8(target
);
435 struct arm
*arm
= &armv8
->arm
;
439 if (!debug_execution
)
440 target_free_all_working_areas(target
);
442 /* current = 1: continue on current pc, otherwise continue at <address> */
443 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
445 resume_pc
= *address
;
447 *address
= resume_pc
;
449 /* Make sure that the Armv7 gdb thumb fixups does not
450 * kill the return address
452 switch (arm
->core_state
) {
454 resume_pc
&= 0xFFFFFFFC;
456 case ARM_STATE_AARCH64
:
457 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
459 case ARM_STATE_THUMB
:
460 case ARM_STATE_THUMB_EE
:
461 /* When the return address is loaded into PC
462 * bit 0 must be 1 to stay in Thumb state
466 case ARM_STATE_JAZELLE
:
467 LOG_ERROR("How do I resume into Jazelle state??");
470 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
471 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
475 /* called it now before restoring context because it uses cpu
476 * register r0 for restoring system control register */
477 retval
= aarch64_restore_system_control_reg(target
);
478 if (retval
== ERROR_OK
)
479 retval
= aarch64_restore_context(target
, handle_breakpoints
);
484 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
486 struct armv8_common
*armv8
= target_to_armv8(target
);
487 struct arm
*arm
= &armv8
->arm
;
491 * * Restart core and wait for it to be started. Clear ITRen and sticky
492 * * exception flags: see ARMv7 ARM, C5.9.
494 * REVISIT: for single stepping, we probably want to
495 * disable IRQs by default, with optional override...
498 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
499 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
500 if (retval
!= ERROR_OK
)
503 if ((dscr
& DSCR_ITE
) == 0)
504 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
505 if ((dscr
& DSCR_ERR
) != 0)
506 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
508 /* make sure to acknowledge the halt event before resuming */
509 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
510 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
513 * open the CTI gate for channel 1 so that the restart events
514 * get passed along to all PEs
516 if (retval
== ERROR_OK
)
517 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
518 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
519 if (retval
!= ERROR_OK
)
523 /* trigger an event on channel 1, generates a restart request to the PE */
524 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
525 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
526 if (retval
!= ERROR_OK
)
529 long long then
= timeval_ms();
531 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
532 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
533 if (retval
!= ERROR_OK
)
535 if ((dscr
& DSCR_HDE
) != 0)
537 if (timeval_ms() > then
+ 1000) {
538 LOG_ERROR("Timeout waiting for resume");
544 target
->debug_reason
= DBG_REASON_NOTHALTED
;
545 target
->state
= TARGET_RUNNING
;
547 /* registers are now invalid */
548 register_cache_invalidate(arm
->core_cache
);
549 register_cache_invalidate(arm
->core_cache
->next
);
554 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
557 struct target_list
*head
;
561 while (head
!= (struct target_list
*)NULL
) {
563 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
564 /* resume current address , not in step mode */
565 retval
+= aarch64_internal_restore(curr
, 1, &address
,
566 handle_breakpoints
, 0);
567 retval
+= aarch64_internal_restart(curr
, true);
575 static int aarch64_resume(struct target
*target
, int current
,
576 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
579 uint64_t addr
= address
;
581 /* dummy resume for smp toggle in order to reduce gdb impact */
582 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
583 /* simulate a start and halt of target */
584 target
->gdb_service
->target
= NULL
;
585 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
586 /* fake resume at next poll we play the target core[1], see poll*/
587 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
591 if (target
->state
!= TARGET_HALTED
)
592 return ERROR_TARGET_NOT_HALTED
;
594 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
597 target
->gdb_service
->core
[0] = -1;
598 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
599 if (retval
!= ERROR_OK
)
602 aarch64_internal_restart(target
, false);
604 if (!debug_execution
) {
605 target
->state
= TARGET_RUNNING
;
606 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
607 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
609 target
->state
= TARGET_DEBUG_RUNNING
;
610 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
611 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
617 static int aarch64_debug_entry(struct target
*target
)
619 int retval
= ERROR_OK
;
620 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
621 struct armv8_common
*armv8
= target_to_armv8(target
);
622 struct arm_dpm
*dpm
= &armv8
->dpm
;
623 enum arm_state core_state
;
625 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), aarch64
->cpudbg_dscr
);
627 dpm
->dscr
= aarch64
->cpudbg_dscr
;
628 core_state
= armv8_dpm_get_core_state(dpm
);
629 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
630 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
632 /* make sure to clear all sticky errors */
633 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
634 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
636 /* discard async exceptions */
637 if (retval
== ERROR_OK
)
638 retval
= dpm
->instr_cpsr_sync(dpm
);
640 if (retval
!= ERROR_OK
)
643 /* Examine debug reason */
644 armv8_dpm_report_dscr(dpm
, aarch64
->cpudbg_dscr
);
646 /* save address of instruction that triggered the watchpoint? */
647 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
651 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
652 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
654 if (retval
!= ERROR_OK
)
658 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
659 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
661 if (retval
!= ERROR_OK
)
664 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
667 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
669 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
670 retval
= armv8
->post_debug_entry(target
);
675 static int aarch64_post_debug_entry(struct target
*target
)
677 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
678 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
680 enum arm_mode target_mode
= ARM_MODE_ANY
;
683 switch (armv8
->arm
.core_mode
) {
685 target_mode
= ARMV8_64_EL1H
;
689 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
693 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
697 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
704 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
708 LOG_INFO("cannot read system control register in this mode");
712 if (target_mode
!= ARM_MODE_ANY
)
713 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
715 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
716 if (retval
!= ERROR_OK
)
719 if (target_mode
!= ARM_MODE_ANY
)
720 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
722 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
723 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
725 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
726 armv8_identify_cache(armv8
);
727 armv8_read_mpidr(armv8
);
730 armv8
->armv8_mmu
.mmu_enabled
=
731 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
732 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
733 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
734 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
735 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
736 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
740 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
741 int handle_breakpoints
)
743 struct armv8_common
*armv8
= target_to_armv8(target
);
747 if (target
->state
!= TARGET_HALTED
) {
748 LOG_WARNING("target not halted");
749 return ERROR_TARGET_NOT_HALTED
;
752 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
753 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
754 if (retval
!= ERROR_OK
)
757 /* make sure EDECR.SS is not set when restoring the register */
760 /* set EDECR.SS to enter hardware step mode */
761 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
762 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
763 if (retval
!= ERROR_OK
)
766 /* disable interrupts while stepping */
767 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
768 if (retval
!= ERROR_OK
)
771 /* resume the target */
772 retval
= aarch64_resume(target
, current
, address
, 0, 0);
773 if (retval
!= ERROR_OK
)
776 long long then
= timeval_ms();
777 while (target
->state
!= TARGET_HALTED
) {
778 retval
= aarch64_poll(target
);
779 if (retval
!= ERROR_OK
)
781 if (timeval_ms() > then
+ 1000) {
782 LOG_ERROR("timeout waiting for target halt");
788 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
789 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
790 if (retval
!= ERROR_OK
)
793 /* restore interrupts */
794 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
795 if (retval
!= ERROR_OK
)
801 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
803 struct armv8_common
*armv8
= target_to_armv8(target
);
805 LOG_DEBUG("%s", target_name(target
));
807 if (armv8
->pre_restore_context
)
808 armv8
->pre_restore_context(target
);
810 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
814 * Cortex-A8 Breakpoint and watchpoint functions
817 /* Setup hardware Breakpoint Register Pair */
818 static int aarch64_set_breakpoint(struct target
*target
,
819 struct breakpoint
*breakpoint
, uint8_t matchmode
)
824 uint8_t byte_addr_select
= 0x0F;
825 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
826 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
827 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
829 if (breakpoint
->set
) {
830 LOG_WARNING("breakpoint already set");
834 if (breakpoint
->type
== BKPT_HARD
) {
836 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
838 if (brp_i
>= aarch64
->brp_num
) {
839 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
840 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
842 breakpoint
->set
= brp_i
+ 1;
843 if (breakpoint
->length
== 2)
844 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
845 control
= ((matchmode
& 0x7) << 20)
847 | (byte_addr_select
<< 5)
849 brp_list
[brp_i
].used
= 1;
850 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
851 brp_list
[brp_i
].control
= control
;
852 bpt_value
= brp_list
[brp_i
].value
;
854 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
855 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
856 (uint32_t)(bpt_value
& 0xFFFFFFFF));
857 if (retval
!= ERROR_OK
)
859 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
860 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
861 (uint32_t)(bpt_value
>> 32));
862 if (retval
!= ERROR_OK
)
865 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
866 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
867 brp_list
[brp_i
].control
);
868 if (retval
!= ERROR_OK
)
870 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
871 brp_list
[brp_i
].control
,
872 brp_list
[brp_i
].value
);
874 } else if (breakpoint
->type
== BKPT_SOFT
) {
877 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
878 retval
= target_read_memory(target
,
879 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
880 breakpoint
->length
, 1,
881 breakpoint
->orig_instr
);
882 if (retval
!= ERROR_OK
)
885 armv8_cache_d_inner_flush_virt(armv8
,
886 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
889 retval
= target_write_memory(target
,
890 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
891 breakpoint
->length
, 1, code
);
892 if (retval
!= ERROR_OK
)
895 armv8_cache_d_inner_flush_virt(armv8
,
896 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
899 armv8_cache_i_inner_inval_virt(armv8
,
900 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
903 breakpoint
->set
= 0x11; /* Any nice value but 0 */
906 /* Ensure that halting debug mode is enable */
907 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
908 if (retval
!= ERROR_OK
) {
909 LOG_DEBUG("Failed to set DSCR.HDE");
916 static int aarch64_set_context_breakpoint(struct target
*target
,
917 struct breakpoint
*breakpoint
, uint8_t matchmode
)
919 int retval
= ERROR_FAIL
;
922 uint8_t byte_addr_select
= 0x0F;
923 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
924 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
925 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
927 if (breakpoint
->set
) {
928 LOG_WARNING("breakpoint already set");
931 /*check available context BRPs*/
932 while ((brp_list
[brp_i
].used
||
933 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
936 if (brp_i
>= aarch64
->brp_num
) {
937 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
941 breakpoint
->set
= brp_i
+ 1;
942 control
= ((matchmode
& 0x7) << 20)
944 | (byte_addr_select
<< 5)
946 brp_list
[brp_i
].used
= 1;
947 brp_list
[brp_i
].value
= (breakpoint
->asid
);
948 brp_list
[brp_i
].control
= control
;
949 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
950 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
951 brp_list
[brp_i
].value
);
952 if (retval
!= ERROR_OK
)
954 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
955 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
956 brp_list
[brp_i
].control
);
957 if (retval
!= ERROR_OK
)
959 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
960 brp_list
[brp_i
].control
,
961 brp_list
[brp_i
].value
);
966 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
968 int retval
= ERROR_FAIL
;
969 int brp_1
= 0; /* holds the contextID pair */
970 int brp_2
= 0; /* holds the IVA pair */
971 uint32_t control_CTX
, control_IVA
;
972 uint8_t CTX_byte_addr_select
= 0x0F;
973 uint8_t IVA_byte_addr_select
= 0x0F;
974 uint8_t CTX_machmode
= 0x03;
975 uint8_t IVA_machmode
= 0x01;
976 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
977 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
978 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
980 if (breakpoint
->set
) {
981 LOG_WARNING("breakpoint already set");
984 /*check available context BRPs*/
985 while ((brp_list
[brp_1
].used
||
986 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
989 printf("brp(CTX) found num: %d\n", brp_1
);
990 if (brp_1
>= aarch64
->brp_num
) {
991 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
995 while ((brp_list
[brp_2
].used
||
996 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
999 printf("brp(IVA) found num: %d\n", brp_2
);
1000 if (brp_2
>= aarch64
->brp_num
) {
1001 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1005 breakpoint
->set
= brp_1
+ 1;
1006 breakpoint
->linked_BRP
= brp_2
;
1007 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1010 | (CTX_byte_addr_select
<< 5)
1012 brp_list
[brp_1
].used
= 1;
1013 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1014 brp_list
[brp_1
].control
= control_CTX
;
1015 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1016 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1017 brp_list
[brp_1
].value
);
1018 if (retval
!= ERROR_OK
)
1020 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1021 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1022 brp_list
[brp_1
].control
);
1023 if (retval
!= ERROR_OK
)
1026 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1029 | (IVA_byte_addr_select
<< 5)
1031 brp_list
[brp_2
].used
= 1;
1032 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1033 brp_list
[brp_2
].control
= control_IVA
;
1034 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1035 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1036 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1037 if (retval
!= ERROR_OK
)
1039 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1040 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1041 brp_list
[brp_2
].value
>> 32);
1042 if (retval
!= ERROR_OK
)
1044 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1045 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1046 brp_list
[brp_2
].control
);
1047 if (retval
!= ERROR_OK
)
1053 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1056 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1057 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1058 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1060 if (!breakpoint
->set
) {
1061 LOG_WARNING("breakpoint not set");
1065 if (breakpoint
->type
== BKPT_HARD
) {
1066 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1067 int brp_i
= breakpoint
->set
- 1;
1068 int brp_j
= breakpoint
->linked_BRP
;
1069 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1070 LOG_DEBUG("Invalid BRP number in breakpoint");
1073 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1074 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1075 brp_list
[brp_i
].used
= 0;
1076 brp_list
[brp_i
].value
= 0;
1077 brp_list
[brp_i
].control
= 0;
1078 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1079 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1080 brp_list
[brp_i
].control
);
1081 if (retval
!= ERROR_OK
)
1083 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1084 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1085 (uint32_t)brp_list
[brp_i
].value
);
1086 if (retval
!= ERROR_OK
)
1088 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1089 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1090 (uint32_t)brp_list
[brp_i
].value
);
1091 if (retval
!= ERROR_OK
)
1093 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1094 LOG_DEBUG("Invalid BRP number in breakpoint");
1097 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1098 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1099 brp_list
[brp_j
].used
= 0;
1100 brp_list
[brp_j
].value
= 0;
1101 brp_list
[brp_j
].control
= 0;
1102 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1103 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1104 brp_list
[brp_j
].control
);
1105 if (retval
!= ERROR_OK
)
1107 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1108 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1109 (uint32_t)brp_list
[brp_j
].value
);
1110 if (retval
!= ERROR_OK
)
1112 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1113 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1114 (uint32_t)brp_list
[brp_j
].value
);
1115 if (retval
!= ERROR_OK
)
1118 breakpoint
->linked_BRP
= 0;
1119 breakpoint
->set
= 0;
1123 int brp_i
= breakpoint
->set
- 1;
1124 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1125 LOG_DEBUG("Invalid BRP number in breakpoint");
1128 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1129 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1130 brp_list
[brp_i
].used
= 0;
1131 brp_list
[brp_i
].value
= 0;
1132 brp_list
[brp_i
].control
= 0;
1133 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1134 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1135 brp_list
[brp_i
].control
);
1136 if (retval
!= ERROR_OK
)
1138 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1139 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1140 brp_list
[brp_i
].value
);
1141 if (retval
!= ERROR_OK
)
1144 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1145 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1146 (uint32_t)brp_list
[brp_i
].value
);
1147 if (retval
!= ERROR_OK
)
1149 breakpoint
->set
= 0;
1153 /* restore original instruction (kept in target endianness) */
1155 armv8_cache_d_inner_flush_virt(armv8
,
1156 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1157 breakpoint
->length
);
1159 if (breakpoint
->length
== 4) {
1160 retval
= target_write_memory(target
,
1161 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1162 4, 1, breakpoint
->orig_instr
);
1163 if (retval
!= ERROR_OK
)
1166 retval
= target_write_memory(target
,
1167 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1168 2, 1, breakpoint
->orig_instr
);
1169 if (retval
!= ERROR_OK
)
1173 armv8_cache_d_inner_flush_virt(armv8
,
1174 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1175 breakpoint
->length
);
1177 armv8_cache_i_inner_inval_virt(armv8
,
1178 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1179 breakpoint
->length
);
1181 breakpoint
->set
= 0;
1186 static int aarch64_add_breakpoint(struct target
*target
,
1187 struct breakpoint
*breakpoint
)
1189 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1191 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1192 LOG_INFO("no hardware breakpoint available");
1193 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1196 if (breakpoint
->type
== BKPT_HARD
)
1197 aarch64
->brp_num_available
--;
1199 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1202 static int aarch64_add_context_breakpoint(struct target
*target
,
1203 struct breakpoint
*breakpoint
)
1205 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1207 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1208 LOG_INFO("no hardware breakpoint available");
1209 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1212 if (breakpoint
->type
== BKPT_HARD
)
1213 aarch64
->brp_num_available
--;
1215 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1218 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1219 struct breakpoint
*breakpoint
)
1221 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1223 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1224 LOG_INFO("no hardware breakpoint available");
1225 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1228 if (breakpoint
->type
== BKPT_HARD
)
1229 aarch64
->brp_num_available
--;
1231 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1235 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1237 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1240 /* It is perfectly possible to remove breakpoints while the target is running */
1241 if (target
->state
!= TARGET_HALTED
) {
1242 LOG_WARNING("target not halted");
1243 return ERROR_TARGET_NOT_HALTED
;
1247 if (breakpoint
->set
) {
1248 aarch64_unset_breakpoint(target
, breakpoint
);
1249 if (breakpoint
->type
== BKPT_HARD
)
1250 aarch64
->brp_num_available
++;
1257 * Cortex-A8 Reset functions
1260 static int aarch64_assert_reset(struct target
*target
)
1262 struct armv8_common
*armv8
= target_to_armv8(target
);
1266 /* FIXME when halt is requested, make it work somehow... */
1268 /* Issue some kind of warm reset. */
1269 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1270 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1271 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1272 /* REVISIT handle "pulls" cases, if there's
1273 * hardware that needs them to work.
1275 jtag_add_reset(0, 1);
1277 LOG_ERROR("%s: how to reset?", target_name(target
));
1281 /* registers are now invalid */
1282 if (target_was_examined(target
))
1283 register_cache_invalidate(armv8
->arm
.core_cache
);
1285 target
->state
= TARGET_RESET
;
1290 static int aarch64_deassert_reset(struct target
*target
)
1296 /* be certain SRST is off */
1297 jtag_add_reset(0, 0);
1299 if (!target_was_examined(target
))
1302 retval
= aarch64_poll(target
);
1303 if (retval
!= ERROR_OK
)
1306 if (target
->reset_halt
) {
1307 if (target
->state
!= TARGET_HALTED
) {
1308 LOG_WARNING("%s: ran after reset and before halt ...",
1309 target_name(target
));
1310 retval
= target_halt(target
);
1311 if (retval
!= ERROR_OK
)
1319 static int aarch64_write_apb_ap_memory(struct target
*target
,
1320 uint64_t address
, uint32_t size
,
1321 uint32_t count
, const uint8_t *buffer
)
1323 /* write memory through APB-AP */
1324 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1325 struct armv8_common
*armv8
= target_to_armv8(target
);
1326 struct arm_dpm
*dpm
= &armv8
->dpm
;
1327 struct arm
*arm
= &armv8
->arm
;
1328 int total_bytes
= count
* size
;
1330 int start_byte
= address
& 0x3;
1331 int end_byte
= (address
+ total_bytes
) & 0x3;
1334 uint8_t *tmp_buff
= NULL
;
1336 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count %" PRIu32
,
1337 address
, size
, count
);
1339 if (target
->state
!= TARGET_HALTED
) {
1340 LOG_WARNING("target not halted");
1341 return ERROR_TARGET_NOT_HALTED
;
1344 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1346 /* Mark register R0 as dirty, as it will be used
1347 * for transferring the data.
1348 * It will be restored automatically when exiting
1351 reg
= armv8_reg_current(arm
, 1);
1354 reg
= armv8_reg_current(arm
, 0);
1357 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1359 /* The algorithm only copies 32 bit words, so the buffer
1360 * should be expanded to include the words at either end.
1361 * The first and last words will be read first to avoid
1362 * corruption if needed.
1364 tmp_buff
= malloc(total_u32
* 4);
1366 if ((start_byte
!= 0) && (total_u32
> 1)) {
1367 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1368 * the other bytes in the word.
1370 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1371 if (retval
!= ERROR_OK
)
1372 goto error_free_buff_w
;
1375 /* If end of write is not aligned, or the write is less than 4 bytes */
1376 if ((end_byte
!= 0) ||
1377 ((total_u32
== 1) && (total_bytes
!= 4))) {
1379 /* Read the last word to avoid corruption during 32 bit write */
1380 int mem_offset
= (total_u32
-1) * 4;
1381 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1382 if (retval
!= ERROR_OK
)
1383 goto error_free_buff_w
;
1386 /* Copy the write buffer over the top of the temporary buffer */
1387 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1389 /* We now have a 32 bit aligned buffer that can be written */
1392 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1393 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1394 if (retval
!= ERROR_OK
)
1395 goto error_free_buff_w
;
1397 /* Set Normal access mode */
1398 dscr
= (dscr
& ~DSCR_MA
);
1399 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1400 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1402 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1403 /* Write X0 with value 'address' using write procedure */
1404 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1405 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1406 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1407 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1409 /* Write R0 with value 'address' using write procedure */
1410 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1411 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1412 dpm
->instr_write_data_dcc(dpm
,
1413 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1416 /* Step 1.d - Change DCC to memory mode */
1417 dscr
= dscr
| DSCR_MA
;
1418 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1419 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1420 if (retval
!= ERROR_OK
)
1421 goto error_unset_dtr_w
;
1424 /* Step 2.a - Do the write */
1425 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1426 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1427 if (retval
!= ERROR_OK
)
1428 goto error_unset_dtr_w
;
1430 /* Step 3.a - Switch DTR mode back to Normal mode */
1431 dscr
= (dscr
& ~DSCR_MA
);
1432 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1433 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1434 if (retval
!= ERROR_OK
)
1435 goto error_unset_dtr_w
;
1437 /* Check for sticky abort flags in the DSCR */
1438 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1439 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1440 if (retval
!= ERROR_OK
)
1441 goto error_free_buff_w
;
1444 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1445 /* Abort occurred - clear it and exit */
1446 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1447 armv8_dpm_handle_exception(dpm
);
1448 goto error_free_buff_w
;
1456 /* Unset DTR mode */
1457 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1458 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1459 dscr
= (dscr
& ~DSCR_MA
);
1460 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1461 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1468 static int aarch64_read_apb_ap_memory(struct target
*target
,
1469 target_addr_t address
, uint32_t size
,
1470 uint32_t count
, uint8_t *buffer
)
1472 /* read memory through APB-AP */
1473 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1474 struct armv8_common
*armv8
= target_to_armv8(target
);
1475 struct arm_dpm
*dpm
= &armv8
->dpm
;
1476 struct arm
*arm
= &armv8
->arm
;
1477 int total_bytes
= count
* size
;
1479 int start_byte
= address
& 0x3;
1480 int end_byte
= (address
+ total_bytes
) & 0x3;
1483 uint8_t *tmp_buff
= NULL
;
1487 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count %" PRIu32
,
1488 address
, size
, count
);
1490 if (target
->state
!= TARGET_HALTED
) {
1491 LOG_WARNING("target not halted");
1492 return ERROR_TARGET_NOT_HALTED
;
1495 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1496 /* Mark register X0, X1 as dirty, as it will be used
1497 * for transferring the data.
1498 * It will be restored automatically when exiting
1501 reg
= armv8_reg_current(arm
, 1);
1504 reg
= armv8_reg_current(arm
, 0);
1508 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1509 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1511 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1513 /* Set Normal access mode */
1514 dscr
= (dscr
& ~DSCR_MA
);
1515 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1516 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1518 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1519 /* Write X0 with value 'address' using write procedure */
1520 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1521 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1522 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
1523 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1524 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1525 retval
+= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1526 /* Step 1.e - Change DCC to memory mode */
1527 dscr
= dscr
| DSCR_MA
;
1528 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1529 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1530 /* Step 1.f - read DBGDTRTX and discard the value */
1531 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1532 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1534 /* Write R0 with value 'address' using write procedure */
1535 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1536 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1537 retval
+= dpm
->instr_write_data_dcc(dpm
,
1538 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1539 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1540 retval
+= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1541 /* Step 1.e - Change DCC to memory mode */
1542 dscr
= dscr
| DSCR_MA
;
1543 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1544 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1545 /* Step 1.f - read DBGDTRTX and discard the value */
1546 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1547 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1550 if (retval
!= ERROR_OK
)
1551 goto error_unset_dtr_r
;
1553 /* Optimize the read as much as we can, either way we read in a single pass */
1554 if ((start_byte
) || (end_byte
)) {
1555 /* The algorithm only copies 32 bit words, so the buffer
1556 * should be expanded to include the words at either end.
1557 * The first and last words will be read into a temp buffer
1558 * to avoid corruption
1560 tmp_buff
= malloc(total_u32
* 4);
1562 goto error_unset_dtr_r
;
1564 /* use the tmp buffer to read the entire data */
1565 u8buf_ptr
= tmp_buff
;
1567 /* address and read length are aligned so read directly into the passed buffer */
1570 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1571 * Abort flags are sticky, so can be read at end of transactions
1573 * This data is read in aligned to 32 bit boundary.
1576 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1577 * increments X0 by 4. */
1578 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1579 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1580 if (retval
!= ERROR_OK
)
1581 goto error_unset_dtr_r
;
1583 /* Step 3.a - set DTR access mode back to Normal mode */
1584 dscr
= (dscr
& ~DSCR_MA
);
1585 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1586 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1587 if (retval
!= ERROR_OK
)
1588 goto error_free_buff_r
;
1590 /* Step 3.b - read DBGDTRTX for the final value */
1591 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1592 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1593 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1595 /* Check for sticky abort flags in the DSCR */
1596 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1597 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1598 if (retval
!= ERROR_OK
)
1599 goto error_free_buff_r
;
1603 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1604 /* Abort occurred - clear it and exit */
1605 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1606 armv8_dpm_handle_exception(dpm
);
1607 goto error_free_buff_r
;
1610 /* check if we need to copy aligned data by applying any shift necessary */
1612 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1620 /* Unset DTR mode */
1621 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1622 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1623 dscr
= (dscr
& ~DSCR_MA
);
1624 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1625 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1632 static int aarch64_read_phys_memory(struct target
*target
,
1633 target_addr_t address
, uint32_t size
,
1634 uint32_t count
, uint8_t *buffer
)
1636 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1637 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
1638 address
, size
, count
);
1640 if (count
&& buffer
) {
1641 /* read memory through APB-AP */
1642 retval
= aarch64_mmu_modify(target
, 0);
1643 if (retval
!= ERROR_OK
)
1645 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1650 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1651 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1653 int mmu_enabled
= 0;
1656 /* aarch64 handles unaligned memory access */
1657 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
1660 /* determine if MMU was enabled on target stop */
1661 retval
= aarch64_mmu(target
, &mmu_enabled
);
1662 if (retval
!= ERROR_OK
)
1666 retval
= aarch64_check_address(target
, address
);
1667 if (retval
!= ERROR_OK
)
1669 /* enable MMU as we could have disabled it for phys access */
1670 retval
= aarch64_mmu_modify(target
, 1);
1671 if (retval
!= ERROR_OK
)
1674 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1677 static int aarch64_write_phys_memory(struct target
*target
,
1678 target_addr_t address
, uint32_t size
,
1679 uint32_t count
, const uint8_t *buffer
)
1681 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1683 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
1686 if (count
&& buffer
) {
1687 /* write memory through APB-AP */
1688 retval
= aarch64_mmu_modify(target
, 0);
1689 if (retval
!= ERROR_OK
)
1691 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1697 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
1698 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1700 int mmu_enabled
= 0;
1703 /* aarch64 handles unaligned memory access */
1704 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
1705 "; count %" PRId32
, address
, size
, count
);
1707 /* determine if MMU was enabled on target stop */
1708 retval
= aarch64_mmu(target
, &mmu_enabled
);
1709 if (retval
!= ERROR_OK
)
1713 retval
= aarch64_check_address(target
, address
);
1714 if (retval
!= ERROR_OK
)
1716 /* enable MMU as we could have disabled it for phys access */
1717 retval
= aarch64_mmu_modify(target
, 1);
1718 if (retval
!= ERROR_OK
)
1721 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1724 static int aarch64_handle_target_request(void *priv
)
1726 struct target
*target
= priv
;
1727 struct armv8_common
*armv8
= target_to_armv8(target
);
1730 if (!target_was_examined(target
))
1732 if (!target
->dbg_msg_enabled
)
1735 if (target
->state
== TARGET_RUNNING
) {
1738 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1739 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1741 /* check if we have data */
1742 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
1743 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1744 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
1745 if (retval
== ERROR_OK
) {
1746 target_request(target
, request
);
1747 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1748 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1756 static int aarch64_examine_first(struct target
*target
)
1758 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1759 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1760 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1762 int retval
= ERROR_OK
;
1763 uint64_t debug
, ttypr
;
1765 uint32_t tmp0
, tmp1
;
1766 debug
= ttypr
= cpuid
= 0;
1768 /* We do one extra read to ensure DAP is configured,
1769 * we call ahbap_debugport_init(swjdp) instead
1771 retval
= dap_dp_init(swjdp
);
1772 if (retval
!= ERROR_OK
)
1775 /* Search for the APB-AB - it is needed for access to debug registers */
1776 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
1777 if (retval
!= ERROR_OK
) {
1778 LOG_ERROR("Could not find APB-AP for debug access");
1782 retval
= mem_ap_init(armv8
->debug_ap
);
1783 if (retval
!= ERROR_OK
) {
1784 LOG_ERROR("Could not initialize the APB-AP");
1788 armv8
->debug_ap
->memaccess_tck
= 80;
1790 if (!target
->dbgbase_set
) {
1792 /* Get ROM Table base */
1794 int32_t coreidx
= target
->coreid
;
1795 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
1796 if (retval
!= ERROR_OK
)
1798 /* Lookup 0x15 -- Processor DAP */
1799 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
1800 &armv8
->debug_base
, &coreidx
);
1801 if (retval
!= ERROR_OK
)
1803 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
1804 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
1806 armv8
->debug_base
= target
->dbgbase
;
1808 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1809 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
1810 if (retval
!= ERROR_OK
) {
1811 LOG_DEBUG("LOCK debug access fail");
1815 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1816 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
1817 if (retval
!= ERROR_OK
) {
1818 LOG_DEBUG("Examine %s failed", "oslock");
1822 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1823 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
1824 if (retval
!= ERROR_OK
) {
1825 LOG_DEBUG("Examine %s failed", "CPUID");
1829 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1830 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
1831 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1832 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
1833 if (retval
!= ERROR_OK
) {
1834 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1838 ttypr
= (ttypr
<< 32) | tmp0
;
1840 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1841 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
1842 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1843 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
1844 if (retval
!= ERROR_OK
) {
1845 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1849 debug
= (debug
<< 32) | tmp0
;
1851 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1852 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
1853 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
1855 if (target
->ctibase
== 0) {
1856 /* assume a v8 rom table layout */
1857 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
1858 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
1860 armv8
->cti_base
= target
->ctibase
;
1862 armv8
->arm
.core_type
= ARM_MODE_MON
;
1863 retval
= aarch64_dpm_setup(aarch64
, debug
);
1864 if (retval
!= ERROR_OK
)
1867 /* Setup Breakpoint Register Pairs */
1868 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
1869 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
1870 aarch64
->brp_num_available
= aarch64
->brp_num
;
1871 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
1872 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
1873 aarch64
->brp_list
[i
].used
= 0;
1874 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
1875 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
1877 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
1878 aarch64
->brp_list
[i
].value
= 0;
1879 aarch64
->brp_list
[i
].control
= 0;
1880 aarch64
->brp_list
[i
].BRPn
= i
;
1883 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
1885 target_set_examined(target
);
1889 static int aarch64_examine(struct target
*target
)
1891 int retval
= ERROR_OK
;
1893 /* don't re-probe hardware after each reset */
1894 if (!target_was_examined(target
))
1895 retval
= aarch64_examine_first(target
);
1897 /* Configure core debug access */
1898 if (retval
== ERROR_OK
)
1899 retval
= aarch64_init_debug_access(target
);
1905 * Cortex-A8 target creation and initialization
1908 static int aarch64_init_target(struct command_context
*cmd_ctx
,
1909 struct target
*target
)
1911 /* examine_first() does a bunch of this */
1915 static int aarch64_init_arch_info(struct target
*target
,
1916 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
1918 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1919 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
1921 armv8
->arm
.dap
= dap
;
1923 /* Setup struct aarch64_common */
1924 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
1925 /* tap has no dap initialized */
1927 tap
->dap
= dap_init();
1929 /* Leave (only) generic DAP stuff for debugport_init() */
1930 tap
->dap
->tap
= tap
;
1933 armv8
->arm
.dap
= tap
->dap
;
1935 aarch64
->fast_reg_read
= 0;
1937 /* register arch-specific functions */
1938 armv8
->examine_debug_reason
= NULL
;
1940 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
1942 armv8
->pre_restore_context
= NULL
;
1944 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
1946 /* REVISIT v7a setup should be in a v7a-specific routine */
1947 armv8_init_arch_info(target
, armv8
);
1948 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
1953 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
1955 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
1957 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
1960 static int aarch64_mmu(struct target
*target
, int *enabled
)
1962 if (target
->state
!= TARGET_HALTED
) {
1963 LOG_ERROR("%s: target not halted", __func__
);
1964 return ERROR_TARGET_INVALID
;
1967 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
1971 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
1972 target_addr_t
*phys
)
1974 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
1977 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
1979 struct target
*target
= get_current_target(CMD_CTX
);
1980 struct armv8_common
*armv8
= target_to_armv8(target
);
1982 return armv8_handle_cache_info_command(CMD_CTX
,
1983 &armv8
->armv8_mmu
.armv8_cache
);
1987 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
1989 struct target
*target
= get_current_target(CMD_CTX
);
1990 if (!target_was_examined(target
)) {
1991 LOG_ERROR("target not examined yet");
1995 return aarch64_init_debug_access(target
);
1997 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
1999 struct target
*target
= get_current_target(CMD_CTX
);
2000 /* check target is an smp target */
2001 struct target_list
*head
;
2002 struct target
*curr
;
2003 head
= target
->head
;
2005 if (head
!= (struct target_list
*)NULL
) {
2006 while (head
!= (struct target_list
*)NULL
) {
2007 curr
= head
->target
;
2011 /* fixes the target display to the debugger */
2012 target
->gdb_service
->target
= target
;
2017 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2019 struct target
*target
= get_current_target(CMD_CTX
);
2020 struct target_list
*head
;
2021 struct target
*curr
;
2022 head
= target
->head
;
2023 if (head
!= (struct target_list
*)NULL
) {
2025 while (head
!= (struct target_list
*)NULL
) {
2026 curr
= head
->target
;
2034 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2036 struct target
*target
= get_current_target(CMD_CTX
);
2037 int retval
= ERROR_OK
;
2038 struct target_list
*head
;
2039 head
= target
->head
;
2040 if (head
!= (struct target_list
*)NULL
) {
2041 if (CMD_ARGC
== 1) {
2043 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2044 if (ERROR_OK
!= retval
)
2046 target
->gdb_service
->core
[1] = coreid
;
2049 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2050 , target
->gdb_service
->core
[1]);
2055 static const struct command_registration aarch64_exec_command_handlers
[] = {
2057 .name
= "cache_info",
2058 .handler
= aarch64_handle_cache_info_command
,
2059 .mode
= COMMAND_EXEC
,
2060 .help
= "display information about target caches",
2065 .handler
= aarch64_handle_dbginit_command
,
2066 .mode
= COMMAND_EXEC
,
2067 .help
= "Initialize core debug",
2070 { .name
= "smp_off",
2071 .handler
= aarch64_handle_smp_off_command
,
2072 .mode
= COMMAND_EXEC
,
2073 .help
= "Stop smp handling",
2078 .handler
= aarch64_handle_smp_on_command
,
2079 .mode
= COMMAND_EXEC
,
2080 .help
= "Restart smp handling",
2085 .handler
= aarch64_handle_smp_gdb_command
,
2086 .mode
= COMMAND_EXEC
,
2087 .help
= "display/fix current core played to gdb",
2092 COMMAND_REGISTRATION_DONE
2094 static const struct command_registration aarch64_command_handlers
[] = {
2096 .chain
= armv8_command_handlers
,
2100 .mode
= COMMAND_ANY
,
2101 .help
= "Cortex-A command group",
2103 .chain
= aarch64_exec_command_handlers
,
2105 COMMAND_REGISTRATION_DONE
2108 struct target_type aarch64_target
= {
2111 .poll
= aarch64_poll
,
2112 .arch_state
= armv8_arch_state
,
2114 .halt
= aarch64_halt
,
2115 .resume
= aarch64_resume
,
2116 .step
= aarch64_step
,
2118 .assert_reset
= aarch64_assert_reset
,
2119 .deassert_reset
= aarch64_deassert_reset
,
2121 /* REVISIT allow exporting VFP3 registers ... */
2122 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2124 .read_memory
= aarch64_read_memory
,
2125 .write_memory
= aarch64_write_memory
,
2127 .checksum_memory
= arm_checksum_memory
,
2128 .blank_check_memory
= arm_blank_check_memory
,
2130 .run_algorithm
= armv4_5_run_algorithm
,
2132 .add_breakpoint
= aarch64_add_breakpoint
,
2133 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2134 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2135 .remove_breakpoint
= aarch64_remove_breakpoint
,
2136 .add_watchpoint
= NULL
,
2137 .remove_watchpoint
= NULL
,
2139 .commands
= aarch64_command_handlers
,
2140 .target_create
= aarch64_target_create
,
2141 .init_target
= aarch64_init_target
,
2142 .examine
= aarch64_examine
,
2144 .read_phys_memory
= aarch64_read_phys_memory
,
2145 .write_phys_memory
= aarch64_write_phys_memory
,
2147 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)