1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
50 static int aarch64_restore_system_control_reg(struct target
*target
)
52 enum arm_mode target_mode
= ARM_MODE_ANY
;
53 int retval
= ERROR_OK
;
56 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
57 struct armv8_common
*armv8
= target_to_armv8(target
);
59 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
60 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
63 switch (armv8
->arm
.core_mode
) {
65 target_mode
= ARMV8_64_EL1H
;
69 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
73 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
77 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
84 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
88 LOG_INFO("cannot read system control register in this mode");
92 if (target_mode
!= ARM_MODE_ANY
)
93 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
95 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
96 if (retval
!= ERROR_OK
)
99 if (target_mode
!= ARM_MODE_ANY
)
100 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
106 /* check address before aarch64_apb read write access with mmu on
107 * remove apb predictible data abort */
108 static int aarch64_check_address(struct target
*target
, uint32_t address
)
113 /* modify system_control_reg in order to enable or disable mmu for :
114 * - virt2phys address conversion
115 * - read or write memory in phys or virt address */
116 static int aarch64_mmu_modify(struct target
*target
, int enable
)
118 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
119 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
120 int retval
= ERROR_OK
;
124 /* if mmu enabled at target stop and mmu not enable */
125 if (!(aarch64
->system_control_reg
& 0x1U
)) {
126 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
129 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
130 aarch64
->system_control_reg_curr
|= 0x1U
;
132 if (aarch64
->system_control_reg_curr
& 0x4U
) {
133 /* data cache is active */
134 aarch64
->system_control_reg_curr
&= ~0x4U
;
135 /* flush data cache armv8 function to be called */
136 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
137 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
139 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
140 aarch64
->system_control_reg_curr
&= ~0x1U
;
144 switch (armv8
->arm
.core_mode
) {
148 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
152 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
156 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
159 LOG_DEBUG("unknown cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
163 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
164 aarch64
->system_control_reg_curr
);
169 * Basic debug access, very low level assumes state is saved
171 static int aarch64_init_debug_access(struct target
*target
)
173 struct armv8_common
*armv8
= target_to_armv8(target
);
179 /* Clear Sticky Power Down status Bit in PRSR to enable access to
180 the registers in the Core Power Domain */
181 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
182 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
183 if (retval
!= ERROR_OK
)
187 * Static CTI configuration:
188 * Channel 0 -> trigger outputs HALT request to PE
189 * Channel 1 -> trigger outputs Resume request to PE
190 * Gate all channel trigger events from entering the CTM
194 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
195 armv8
->cti_base
+ CTI_CTR
, 1);
196 /* By default, gate all channel triggers to and from the CTM */
197 if (retval
== ERROR_OK
)
198 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
199 armv8
->cti_base
+ CTI_GATE
, 0);
200 /* output halt requests to PE on channel 0 trigger */
201 if (retval
== ERROR_OK
)
202 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
203 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
204 /* output restart requests to PE on channel 1 trigger */
205 if (retval
== ERROR_OK
)
206 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
207 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
208 if (retval
!= ERROR_OK
)
211 /* Resync breakpoint registers */
213 /* Since this is likely called from init or reset, update target state information*/
214 return aarch64_poll(target
);
217 /* Write to memory mapped registers directly with no cache or mmu handling */
218 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
223 struct armv8_common
*armv8
= target_to_armv8(target
);
225 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
230 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
232 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
235 dpm
->arm
= &a8
->armv8_common
.arm
;
238 retval
= armv8_dpm_setup(dpm
);
239 if (retval
== ERROR_OK
)
240 retval
= armv8_dpm_initialize(dpm
);
245 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
247 struct armv8_common
*armv8
= target_to_armv8(target
);
251 int retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
252 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
253 if (ERROR_OK
!= retval
)
259 dscr
|= value
& bit_mask
;
262 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
263 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
267 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
269 struct target_list
*head
;
273 while (head
!= (struct target_list
*)NULL
) {
275 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
281 static int aarch64_halt(struct target
*target
);
283 static int aarch64_halt_smp(struct target
*target
)
285 int retval
= ERROR_OK
;
286 struct target_list
*head
= target
->head
;
288 while (head
!= (struct target_list
*)NULL
) {
289 struct target
*curr
= head
->target
;
290 struct armv8_common
*armv8
= target_to_armv8(curr
);
292 /* open the gate for channel 0 to let HALT requests pass to the CTM */
294 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
295 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
296 if (retval
== ERROR_OK
)
297 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
299 if (retval
!= ERROR_OK
)
305 /* halt the target PE */
306 if (retval
== ERROR_OK
)
307 retval
= aarch64_halt(target
);
312 static int update_halt_gdb(struct target
*target
)
315 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
316 target
->gdb_service
->target
= target
;
317 target
->gdb_service
->core
[0] = target
->coreid
;
318 retval
+= aarch64_halt_smp(target
);
324 * Cortex-A8 Run control
327 static int aarch64_poll(struct target
*target
)
329 int retval
= ERROR_OK
;
331 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
332 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
333 enum target_state prev_target_state
= target
->state
;
334 /* toggle to another core is done by gdb as follow */
335 /* maint packet J core_id */
337 /* the next polling trigger an halt event sent to gdb */
338 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
339 (target
->gdb_service
) &&
340 (target
->gdb_service
->target
== NULL
)) {
341 target
->gdb_service
->target
=
342 get_aarch64(target
, target
->gdb_service
->core
[1]);
343 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
346 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
347 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
348 if (retval
!= ERROR_OK
)
350 aarch64
->cpudbg_dscr
= dscr
;
352 if (DSCR_RUN_MODE(dscr
) == 0x3) {
353 if (prev_target_state
!= TARGET_HALTED
) {
354 /* We have a halting debug event */
355 LOG_DEBUG("Target %s halted", target_name(target
));
356 target
->state
= TARGET_HALTED
;
357 if ((prev_target_state
== TARGET_RUNNING
)
358 || (prev_target_state
== TARGET_UNKNOWN
)
359 || (prev_target_state
== TARGET_RESET
)) {
360 retval
= aarch64_debug_entry(target
);
361 if (retval
!= ERROR_OK
)
364 retval
= update_halt_gdb(target
);
365 if (retval
!= ERROR_OK
)
368 target_call_event_callbacks(target
,
369 TARGET_EVENT_HALTED
);
371 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
374 retval
= aarch64_debug_entry(target
);
375 if (retval
!= ERROR_OK
)
378 retval
= update_halt_gdb(target
);
379 if (retval
!= ERROR_OK
)
383 target_call_event_callbacks(target
,
384 TARGET_EVENT_DEBUG_HALTED
);
388 target
->state
= TARGET_RUNNING
;
393 static int aarch64_halt(struct target
*target
)
395 int retval
= ERROR_OK
;
397 struct armv8_common
*armv8
= target_to_armv8(target
);
400 * add HDE in halting debug mode
402 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
403 if (retval
!= ERROR_OK
)
406 /* trigger an event on channel 0, this outputs a halt request to the PE */
407 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
408 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
409 if (retval
!= ERROR_OK
)
412 long long then
= timeval_ms();
414 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
415 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
416 if (retval
!= ERROR_OK
)
418 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
420 if (timeval_ms() > then
+ 1000) {
421 LOG_ERROR("Timeout waiting for halt");
426 target
->debug_reason
= DBG_REASON_DBGRQ
;
431 static int aarch64_internal_restore(struct target
*target
, int current
,
432 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
434 struct armv8_common
*armv8
= target_to_armv8(target
);
435 struct arm
*arm
= &armv8
->arm
;
439 if (!debug_execution
)
440 target_free_all_working_areas(target
);
442 /* current = 1: continue on current pc, otherwise continue at <address> */
443 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
445 resume_pc
= *address
;
447 *address
= resume_pc
;
449 /* Make sure that the Armv7 gdb thumb fixups does not
450 * kill the return address
452 switch (arm
->core_state
) {
454 resume_pc
&= 0xFFFFFFFC;
456 case ARM_STATE_AARCH64
:
457 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
459 case ARM_STATE_THUMB
:
460 case ARM_STATE_THUMB_EE
:
461 /* When the return address is loaded into PC
462 * bit 0 must be 1 to stay in Thumb state
466 case ARM_STATE_JAZELLE
:
467 LOG_ERROR("How do I resume into Jazelle state??");
470 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
471 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
475 /* called it now before restoring context because it uses cpu
476 * register r0 for restoring system control register */
477 retval
= aarch64_restore_system_control_reg(target
);
478 if (retval
== ERROR_OK
)
479 retval
= aarch64_restore_context(target
, handle_breakpoints
);
484 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
486 struct armv8_common
*armv8
= target_to_armv8(target
);
487 struct arm
*arm
= &armv8
->arm
;
491 * * Restart core and wait for it to be started. Clear ITRen and sticky
492 * * exception flags: see ARMv7 ARM, C5.9.
494 * REVISIT: for single stepping, we probably want to
495 * disable IRQs by default, with optional override...
498 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
499 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
500 if (retval
!= ERROR_OK
)
503 if ((dscr
& DSCR_ITE
) == 0)
504 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
505 if ((dscr
& DSCR_ERR
) != 0)
506 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
508 /* make sure to acknowledge the halt event before resuming */
509 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
510 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
513 * open the CTI gate for channel 1 so that the restart events
514 * get passed along to all PEs
516 if (retval
== ERROR_OK
)
517 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
518 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
519 if (retval
!= ERROR_OK
)
523 /* trigger an event on channel 1, generates a restart request to the PE */
524 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
525 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
526 if (retval
!= ERROR_OK
)
529 long long then
= timeval_ms();
531 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
532 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
533 if (retval
!= ERROR_OK
)
535 if ((dscr
& DSCR_HDE
) != 0)
537 if (timeval_ms() > then
+ 1000) {
538 LOG_ERROR("Timeout waiting for resume");
544 target
->debug_reason
= DBG_REASON_NOTHALTED
;
545 target
->state
= TARGET_RUNNING
;
547 /* registers are now invalid */
548 register_cache_invalidate(arm
->core_cache
);
549 register_cache_invalidate(arm
->core_cache
->next
);
554 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
557 struct target_list
*head
;
561 while (head
!= (struct target_list
*)NULL
) {
563 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
564 /* resume current address , not in step mode */
565 retval
+= aarch64_internal_restore(curr
, 1, &address
,
566 handle_breakpoints
, 0);
567 retval
+= aarch64_internal_restart(curr
, true);
575 static int aarch64_resume(struct target
*target
, int current
,
576 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
579 uint64_t addr
= address
;
581 /* dummy resume for smp toggle in order to reduce gdb impact */
582 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
583 /* simulate a start and halt of target */
584 target
->gdb_service
->target
= NULL
;
585 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
586 /* fake resume at next poll we play the target core[1], see poll*/
587 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
591 if (target
->state
!= TARGET_HALTED
)
592 return ERROR_TARGET_NOT_HALTED
;
594 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
597 target
->gdb_service
->core
[0] = -1;
598 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
599 if (retval
!= ERROR_OK
)
602 aarch64_internal_restart(target
, false);
604 if (!debug_execution
) {
605 target
->state
= TARGET_RUNNING
;
606 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
607 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
609 target
->state
= TARGET_DEBUG_RUNNING
;
610 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
611 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
617 static int aarch64_debug_entry(struct target
*target
)
619 int retval
= ERROR_OK
;
620 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
621 struct armv8_common
*armv8
= target_to_armv8(target
);
622 struct arm_dpm
*dpm
= &armv8
->dpm
;
623 enum arm_state core_state
;
625 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), aarch64
->cpudbg_dscr
);
627 dpm
->dscr
= aarch64
->cpudbg_dscr
;
628 core_state
= armv8_dpm_get_core_state(dpm
);
629 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
630 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
632 /* make sure to clear all sticky errors */
633 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
634 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
636 /* discard async exceptions */
637 if (retval
== ERROR_OK
)
638 retval
= dpm
->instr_cpsr_sync(dpm
);
640 if (retval
!= ERROR_OK
)
643 /* Examine debug reason */
644 armv8_dpm_report_dscr(dpm
, aarch64
->cpudbg_dscr
);
646 /* save address of instruction that triggered the watchpoint? */
647 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
651 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
652 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
654 if (retval
!= ERROR_OK
)
658 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
659 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
661 if (retval
!= ERROR_OK
)
664 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
667 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
669 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
670 retval
= armv8
->post_debug_entry(target
);
675 static int aarch64_post_debug_entry(struct target
*target
)
677 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
678 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
680 enum arm_mode target_mode
= ARM_MODE_ANY
;
683 switch (armv8
->arm
.core_mode
) {
685 target_mode
= ARMV8_64_EL1H
;
689 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
693 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
697 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
704 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
708 LOG_INFO("cannot read system control register in this mode");
712 if (target_mode
!= ARM_MODE_ANY
)
713 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
715 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
716 if (retval
!= ERROR_OK
)
719 if (target_mode
!= ARM_MODE_ANY
)
720 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
722 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
723 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
725 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
726 armv8_identify_cache(armv8
);
727 armv8_read_mpidr(armv8
);
730 armv8
->armv8_mmu
.mmu_enabled
=
731 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
732 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
733 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
734 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
735 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
736 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
740 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
741 int handle_breakpoints
)
743 struct armv8_common
*armv8
= target_to_armv8(target
);
747 if (target
->state
!= TARGET_HALTED
) {
748 LOG_WARNING("target not halted");
749 return ERROR_TARGET_NOT_HALTED
;
752 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
753 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
754 if (retval
!= ERROR_OK
)
757 /* make sure EDECR.SS is not set when restoring the register */
760 /* set EDECR.SS to enter hardware step mode */
761 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
762 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
763 if (retval
!= ERROR_OK
)
766 /* disable interrupts while stepping */
767 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
768 if (retval
!= ERROR_OK
)
771 /* resume the target */
772 retval
= aarch64_resume(target
, current
, address
, 0, 0);
773 if (retval
!= ERROR_OK
)
776 long long then
= timeval_ms();
777 while (target
->state
!= TARGET_HALTED
) {
778 retval
= aarch64_poll(target
);
779 if (retval
!= ERROR_OK
)
781 if (timeval_ms() > then
+ 1000) {
782 LOG_ERROR("timeout waiting for target halt");
788 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
789 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
790 if (retval
!= ERROR_OK
)
793 /* restore interrupts */
794 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
795 if (retval
!= ERROR_OK
)
801 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
803 struct armv8_common
*armv8
= target_to_armv8(target
);
805 LOG_DEBUG("%s", target_name(target
));
807 if (armv8
->pre_restore_context
)
808 armv8
->pre_restore_context(target
);
810 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
814 * Cortex-A8 Breakpoint and watchpoint functions
817 /* Setup hardware Breakpoint Register Pair */
818 static int aarch64_set_breakpoint(struct target
*target
,
819 struct breakpoint
*breakpoint
, uint8_t matchmode
)
824 uint8_t byte_addr_select
= 0x0F;
825 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
826 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
827 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
829 if (breakpoint
->set
) {
830 LOG_WARNING("breakpoint already set");
834 if (breakpoint
->type
== BKPT_HARD
) {
836 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
838 if (brp_i
>= aarch64
->brp_num
) {
839 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
840 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
842 breakpoint
->set
= brp_i
+ 1;
843 if (breakpoint
->length
== 2)
844 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
845 control
= ((matchmode
& 0x7) << 20)
847 | (byte_addr_select
<< 5)
849 brp_list
[brp_i
].used
= 1;
850 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
851 brp_list
[brp_i
].control
= control
;
852 bpt_value
= brp_list
[brp_i
].value
;
854 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
855 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
856 (uint32_t)(bpt_value
& 0xFFFFFFFF));
857 if (retval
!= ERROR_OK
)
859 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
860 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
861 (uint32_t)(bpt_value
>> 32));
862 if (retval
!= ERROR_OK
)
865 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
866 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
867 brp_list
[brp_i
].control
);
868 if (retval
!= ERROR_OK
)
870 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
871 brp_list
[brp_i
].control
,
872 brp_list
[brp_i
].value
);
874 } else if (breakpoint
->type
== BKPT_SOFT
) {
877 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
878 retval
= target_read_memory(target
,
879 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
880 breakpoint
->length
, 1,
881 breakpoint
->orig_instr
);
882 if (retval
!= ERROR_OK
)
885 armv8_cache_d_inner_flush_virt(armv8
,
886 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
889 retval
= target_write_memory(target
,
890 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
891 breakpoint
->length
, 1, code
);
892 if (retval
!= ERROR_OK
)
895 armv8_cache_d_inner_flush_virt(armv8
,
896 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
899 armv8_cache_i_inner_inval_virt(armv8
,
900 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
903 breakpoint
->set
= 0x11; /* Any nice value but 0 */
906 /* Ensure that halting debug mode is enable */
907 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
908 if (retval
!= ERROR_OK
) {
909 LOG_DEBUG("Failed to set DSCR.HDE");
916 static int aarch64_set_context_breakpoint(struct target
*target
,
917 struct breakpoint
*breakpoint
, uint8_t matchmode
)
919 int retval
= ERROR_FAIL
;
922 uint8_t byte_addr_select
= 0x0F;
923 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
924 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
925 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
927 if (breakpoint
->set
) {
928 LOG_WARNING("breakpoint already set");
931 /*check available context BRPs*/
932 while ((brp_list
[brp_i
].used
||
933 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
936 if (brp_i
>= aarch64
->brp_num
) {
937 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
941 breakpoint
->set
= brp_i
+ 1;
942 control
= ((matchmode
& 0x7) << 20)
944 | (byte_addr_select
<< 5)
946 brp_list
[brp_i
].used
= 1;
947 brp_list
[brp_i
].value
= (breakpoint
->asid
);
948 brp_list
[brp_i
].control
= control
;
949 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
950 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
951 brp_list
[brp_i
].value
);
952 if (retval
!= ERROR_OK
)
954 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
955 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
956 brp_list
[brp_i
].control
);
957 if (retval
!= ERROR_OK
)
959 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
960 brp_list
[brp_i
].control
,
961 brp_list
[brp_i
].value
);
966 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
968 int retval
= ERROR_FAIL
;
969 int brp_1
= 0; /* holds the contextID pair */
970 int brp_2
= 0; /* holds the IVA pair */
971 uint32_t control_CTX
, control_IVA
;
972 uint8_t CTX_byte_addr_select
= 0x0F;
973 uint8_t IVA_byte_addr_select
= 0x0F;
974 uint8_t CTX_machmode
= 0x03;
975 uint8_t IVA_machmode
= 0x01;
976 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
977 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
978 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
980 if (breakpoint
->set
) {
981 LOG_WARNING("breakpoint already set");
984 /*check available context BRPs*/
985 while ((brp_list
[brp_1
].used
||
986 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
989 printf("brp(CTX) found num: %d\n", brp_1
);
990 if (brp_1
>= aarch64
->brp_num
) {
991 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
995 while ((brp_list
[brp_2
].used
||
996 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
999 printf("brp(IVA) found num: %d\n", brp_2
);
1000 if (brp_2
>= aarch64
->brp_num
) {
1001 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1005 breakpoint
->set
= brp_1
+ 1;
1006 breakpoint
->linked_BRP
= brp_2
;
1007 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1010 | (CTX_byte_addr_select
<< 5)
1012 brp_list
[brp_1
].used
= 1;
1013 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1014 brp_list
[brp_1
].control
= control_CTX
;
1015 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1016 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1017 brp_list
[brp_1
].value
);
1018 if (retval
!= ERROR_OK
)
1020 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1021 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1022 brp_list
[brp_1
].control
);
1023 if (retval
!= ERROR_OK
)
1026 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1029 | (IVA_byte_addr_select
<< 5)
1031 brp_list
[brp_2
].used
= 1;
1032 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1033 brp_list
[brp_2
].control
= control_IVA
;
1034 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1035 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1036 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1037 if (retval
!= ERROR_OK
)
1039 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1040 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1041 brp_list
[brp_2
].value
>> 32);
1042 if (retval
!= ERROR_OK
)
1044 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1045 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1046 brp_list
[brp_2
].control
);
1047 if (retval
!= ERROR_OK
)
1053 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1056 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1057 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1058 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1060 if (!breakpoint
->set
) {
1061 LOG_WARNING("breakpoint not set");
1065 if (breakpoint
->type
== BKPT_HARD
) {
1066 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1067 int brp_i
= breakpoint
->set
- 1;
1068 int brp_j
= breakpoint
->linked_BRP
;
1069 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1070 LOG_DEBUG("Invalid BRP number in breakpoint");
1073 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1074 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1075 brp_list
[brp_i
].used
= 0;
1076 brp_list
[brp_i
].value
= 0;
1077 brp_list
[brp_i
].control
= 0;
1078 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1079 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1080 brp_list
[brp_i
].control
);
1081 if (retval
!= ERROR_OK
)
1083 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1084 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1085 (uint32_t)brp_list
[brp_i
].value
);
1086 if (retval
!= ERROR_OK
)
1088 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1089 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1090 (uint32_t)brp_list
[brp_i
].value
);
1091 if (retval
!= ERROR_OK
)
1093 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1094 LOG_DEBUG("Invalid BRP number in breakpoint");
1097 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1098 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1099 brp_list
[brp_j
].used
= 0;
1100 brp_list
[brp_j
].value
= 0;
1101 brp_list
[brp_j
].control
= 0;
1102 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1103 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1104 brp_list
[brp_j
].control
);
1105 if (retval
!= ERROR_OK
)
1107 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1108 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1109 (uint32_t)brp_list
[brp_j
].value
);
1110 if (retval
!= ERROR_OK
)
1112 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1113 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1114 (uint32_t)brp_list
[brp_j
].value
);
1115 if (retval
!= ERROR_OK
)
1118 breakpoint
->linked_BRP
= 0;
1119 breakpoint
->set
= 0;
1123 int brp_i
= breakpoint
->set
- 1;
1124 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1125 LOG_DEBUG("Invalid BRP number in breakpoint");
1128 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1129 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1130 brp_list
[brp_i
].used
= 0;
1131 brp_list
[brp_i
].value
= 0;
1132 brp_list
[brp_i
].control
= 0;
1133 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1134 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1135 brp_list
[brp_i
].control
);
1136 if (retval
!= ERROR_OK
)
1138 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1139 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1140 brp_list
[brp_i
].value
);
1141 if (retval
!= ERROR_OK
)
1144 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1145 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1146 (uint32_t)brp_list
[brp_i
].value
);
1147 if (retval
!= ERROR_OK
)
1149 breakpoint
->set
= 0;
1153 /* restore original instruction (kept in target endianness) */
1155 armv8_cache_d_inner_flush_virt(armv8
,
1156 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1157 breakpoint
->length
);
1159 if (breakpoint
->length
== 4) {
1160 retval
= target_write_memory(target
,
1161 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1162 4, 1, breakpoint
->orig_instr
);
1163 if (retval
!= ERROR_OK
)
1166 retval
= target_write_memory(target
,
1167 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1168 2, 1, breakpoint
->orig_instr
);
1169 if (retval
!= ERROR_OK
)
1173 armv8_cache_d_inner_flush_virt(armv8
,
1174 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1175 breakpoint
->length
);
1177 armv8_cache_i_inner_inval_virt(armv8
,
1178 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1179 breakpoint
->length
);
1181 breakpoint
->set
= 0;
1186 static int aarch64_add_breakpoint(struct target
*target
,
1187 struct breakpoint
*breakpoint
)
1189 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1191 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1192 LOG_INFO("no hardware breakpoint available");
1193 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1196 if (breakpoint
->type
== BKPT_HARD
)
1197 aarch64
->brp_num_available
--;
1199 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1202 static int aarch64_add_context_breakpoint(struct target
*target
,
1203 struct breakpoint
*breakpoint
)
1205 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1207 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1208 LOG_INFO("no hardware breakpoint available");
1209 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1212 if (breakpoint
->type
== BKPT_HARD
)
1213 aarch64
->brp_num_available
--;
1215 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1218 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1219 struct breakpoint
*breakpoint
)
1221 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1223 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1224 LOG_INFO("no hardware breakpoint available");
1225 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1228 if (breakpoint
->type
== BKPT_HARD
)
1229 aarch64
->brp_num_available
--;
1231 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1235 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1237 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1240 /* It is perfectly possible to remove breakpoints while the target is running */
1241 if (target
->state
!= TARGET_HALTED
) {
1242 LOG_WARNING("target not halted");
1243 return ERROR_TARGET_NOT_HALTED
;
1247 if (breakpoint
->set
) {
1248 aarch64_unset_breakpoint(target
, breakpoint
);
1249 if (breakpoint
->type
== BKPT_HARD
)
1250 aarch64
->brp_num_available
++;
1257 * Cortex-A8 Reset functions
1260 static int aarch64_assert_reset(struct target
*target
)
1262 struct armv8_common
*armv8
= target_to_armv8(target
);
1266 /* FIXME when halt is requested, make it work somehow... */
1268 /* Issue some kind of warm reset. */
1269 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1270 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1271 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1272 /* REVISIT handle "pulls" cases, if there's
1273 * hardware that needs them to work.
1275 jtag_add_reset(0, 1);
1277 LOG_ERROR("%s: how to reset?", target_name(target
));
1281 /* registers are now invalid */
1282 if (target_was_examined(target
))
1283 register_cache_invalidate(armv8
->arm
.core_cache
);
1285 target
->state
= TARGET_RESET
;
1290 static int aarch64_deassert_reset(struct target
*target
)
1296 /* be certain SRST is off */
1297 jtag_add_reset(0, 0);
1299 if (!target_was_examined(target
))
1302 retval
= aarch64_poll(target
);
1303 if (retval
!= ERROR_OK
)
1306 if (target
->reset_halt
) {
1307 if (target
->state
!= TARGET_HALTED
) {
1308 LOG_WARNING("%s: ran after reset and before halt ...",
1309 target_name(target
));
1310 retval
= target_halt(target
);
1311 if (retval
!= ERROR_OK
)
1319 static int aarch64_write_apb_ap_memory(struct target
*target
,
1320 uint64_t address
, uint32_t size
,
1321 uint32_t count
, const uint8_t *buffer
)
1323 /* write memory through APB-AP */
1324 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1325 struct armv8_common
*armv8
= target_to_armv8(target
);
1326 struct arm_dpm
*dpm
= &armv8
->dpm
;
1327 struct arm
*arm
= &armv8
->arm
;
1328 int total_bytes
= count
* size
;
1330 int start_byte
= address
& 0x3;
1331 int end_byte
= (address
+ total_bytes
) & 0x3;
1334 uint8_t *tmp_buff
= NULL
;
1336 if (target
->state
!= TARGET_HALTED
) {
1337 LOG_WARNING("target not halted");
1338 return ERROR_TARGET_NOT_HALTED
;
1341 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1343 /* Mark register R0 as dirty, as it will be used
1344 * for transferring the data.
1345 * It will be restored automatically when exiting
1348 reg
= armv8_reg_current(arm
, 1);
1351 reg
= armv8_reg_current(arm
, 0);
1354 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1356 /* The algorithm only copies 32 bit words, so the buffer
1357 * should be expanded to include the words at either end.
1358 * The first and last words will be read first to avoid
1359 * corruption if needed.
1361 tmp_buff
= malloc(total_u32
* 4);
1363 if ((start_byte
!= 0) && (total_u32
> 1)) {
1364 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1365 * the other bytes in the word.
1367 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1368 if (retval
!= ERROR_OK
)
1369 goto error_free_buff_w
;
1372 /* If end of write is not aligned, or the write is less than 4 bytes */
1373 if ((end_byte
!= 0) ||
1374 ((total_u32
== 1) && (total_bytes
!= 4))) {
1376 /* Read the last word to avoid corruption during 32 bit write */
1377 int mem_offset
= (total_u32
-1) * 4;
1378 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1379 if (retval
!= ERROR_OK
)
1380 goto error_free_buff_w
;
1383 /* Copy the write buffer over the top of the temporary buffer */
1384 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1386 /* We now have a 32 bit aligned buffer that can be written */
1389 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1390 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1391 if (retval
!= ERROR_OK
)
1392 goto error_free_buff_w
;
1394 /* Set Normal access mode */
1395 dscr
= (dscr
& ~DSCR_MA
);
1396 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1397 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1399 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1400 /* Write X0 with value 'address' using write procedure */
1401 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1402 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1403 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1404 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1406 /* Write R0 with value 'address' using write procedure */
1407 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1408 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1409 dpm
->instr_write_data_dcc(dpm
,
1410 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1413 /* Step 1.d - Change DCC to memory mode */
1414 dscr
= dscr
| DSCR_MA
;
1415 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1416 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1417 if (retval
!= ERROR_OK
)
1418 goto error_unset_dtr_w
;
1421 /* Step 2.a - Do the write */
1422 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1423 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1424 if (retval
!= ERROR_OK
)
1425 goto error_unset_dtr_w
;
1427 /* Step 3.a - Switch DTR mode back to Normal mode */
1428 dscr
= (dscr
& ~DSCR_MA
);
1429 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1430 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1431 if (retval
!= ERROR_OK
)
1432 goto error_unset_dtr_w
;
1434 /* Check for sticky abort flags in the DSCR */
1435 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1436 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1437 if (retval
!= ERROR_OK
)
1438 goto error_free_buff_w
;
1441 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1442 /* Abort occurred - clear it and exit */
1443 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1444 armv8_dpm_handle_exception(dpm
);
1445 goto error_free_buff_w
;
1453 /* Unset DTR mode */
1454 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1455 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1456 dscr
= (dscr
& ~DSCR_MA
);
1457 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1458 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1465 static int aarch64_read_apb_ap_memory(struct target
*target
,
1466 target_addr_t address
, uint32_t size
,
1467 uint32_t count
, uint8_t *buffer
)
1469 /* read memory through APB-AP */
1470 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1471 struct armv8_common
*armv8
= target_to_armv8(target
);
1472 struct arm_dpm
*dpm
= &armv8
->dpm
;
1473 struct arm
*arm
= &armv8
->arm
;
1474 int total_bytes
= count
* size
;
1476 int start_byte
= address
& 0x3;
1477 int end_byte
= (address
+ total_bytes
) & 0x3;
1480 uint8_t *tmp_buff
= NULL
;
1484 if (target
->state
!= TARGET_HALTED
) {
1485 LOG_WARNING("target not halted");
1486 return ERROR_TARGET_NOT_HALTED
;
1489 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1490 /* Mark register X0, X1 as dirty, as it will be used
1491 * for transferring the data.
1492 * It will be restored automatically when exiting
1495 reg
= armv8_reg_current(arm
, 1);
1498 reg
= armv8_reg_current(arm
, 0);
1502 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1503 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1505 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1507 /* Set Normal access mode */
1508 dscr
= (dscr
& ~DSCR_MA
);
1509 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1510 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1512 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1513 /* Write X0 with value 'address' using write procedure */
1514 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1515 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1516 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
1517 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1518 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1519 retval
+= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1520 /* Step 1.e - Change DCC to memory mode */
1521 dscr
= dscr
| DSCR_MA
;
1522 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1523 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1524 /* Step 1.f - read DBGDTRTX and discard the value */
1525 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1526 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1528 /* Write R0 with value 'address' using write procedure */
1529 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1530 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1531 retval
+= dpm
->instr_write_data_dcc(dpm
,
1532 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1533 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1534 retval
+= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1535 /* Step 1.e - Change DCC to memory mode */
1536 dscr
= dscr
| DSCR_MA
;
1537 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1538 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1539 /* Step 1.f - read DBGDTRTX and discard the value */
1540 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1541 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1544 if (retval
!= ERROR_OK
)
1545 goto error_unset_dtr_r
;
1547 /* Optimize the read as much as we can, either way we read in a single pass */
1548 if ((start_byte
) || (end_byte
)) {
1549 /* The algorithm only copies 32 bit words, so the buffer
1550 * should be expanded to include the words at either end.
1551 * The first and last words will be read into a temp buffer
1552 * to avoid corruption
1554 tmp_buff
= malloc(total_u32
* 4);
1556 goto error_unset_dtr_r
;
1558 /* use the tmp buffer to read the entire data */
1559 u8buf_ptr
= tmp_buff
;
1561 /* address and read length are aligned so read directly into the passed buffer */
1564 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1565 * Abort flags are sticky, so can be read at end of transactions
1567 * This data is read in aligned to 32 bit boundary.
1570 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1571 * increments X0 by 4. */
1572 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1573 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1574 if (retval
!= ERROR_OK
)
1575 goto error_unset_dtr_r
;
1577 /* Step 3.a - set DTR access mode back to Normal mode */
1578 dscr
= (dscr
& ~DSCR_MA
);
1579 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1580 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1581 if (retval
!= ERROR_OK
)
1582 goto error_free_buff_r
;
1584 /* Step 3.b - read DBGDTRTX for the final value */
1585 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1586 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1587 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1589 /* Check for sticky abort flags in the DSCR */
1590 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1591 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1592 if (retval
!= ERROR_OK
)
1593 goto error_free_buff_r
;
1597 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1598 /* Abort occurred - clear it and exit */
1599 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1600 armv8_dpm_handle_exception(dpm
);
1601 goto error_free_buff_r
;
1604 /* check if we need to copy aligned data by applying any shift necessary */
1606 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1614 /* Unset DTR mode */
1615 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1616 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1617 dscr
= (dscr
& ~DSCR_MA
);
1618 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1619 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1626 static int aarch64_read_phys_memory(struct target
*target
,
1627 target_addr_t address
, uint32_t size
,
1628 uint32_t count
, uint8_t *buffer
)
1630 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1632 if (count
&& buffer
) {
1633 /* read memory through APB-AP */
1634 retval
= aarch64_mmu_modify(target
, 0);
1635 if (retval
!= ERROR_OK
)
1637 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1642 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1643 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1645 int mmu_enabled
= 0;
1648 /* determine if MMU was enabled on target stop */
1649 retval
= aarch64_mmu(target
, &mmu_enabled
);
1650 if (retval
!= ERROR_OK
)
1654 retval
= aarch64_check_address(target
, address
);
1655 if (retval
!= ERROR_OK
)
1657 /* enable MMU as we could have disabled it for phys access */
1658 retval
= aarch64_mmu_modify(target
, 1);
1659 if (retval
!= ERROR_OK
)
1662 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1665 static int aarch64_write_phys_memory(struct target
*target
,
1666 target_addr_t address
, uint32_t size
,
1667 uint32_t count
, const uint8_t *buffer
)
1669 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1671 if (count
&& buffer
) {
1672 /* write memory through APB-AP */
1673 retval
= aarch64_mmu_modify(target
, 0);
1674 if (retval
!= ERROR_OK
)
1676 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1682 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
1683 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1685 int mmu_enabled
= 0;
1688 /* determine if MMU was enabled on target stop */
1689 retval
= aarch64_mmu(target
, &mmu_enabled
);
1690 if (retval
!= ERROR_OK
)
1694 retval
= aarch64_check_address(target
, address
);
1695 if (retval
!= ERROR_OK
)
1697 /* enable MMU as we could have disabled it for phys access */
1698 retval
= aarch64_mmu_modify(target
, 1);
1699 if (retval
!= ERROR_OK
)
1702 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1705 static int aarch64_handle_target_request(void *priv
)
1707 struct target
*target
= priv
;
1708 struct armv8_common
*armv8
= target_to_armv8(target
);
1711 if (!target_was_examined(target
))
1713 if (!target
->dbg_msg_enabled
)
1716 if (target
->state
== TARGET_RUNNING
) {
1719 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1720 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1722 /* check if we have data */
1723 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
1724 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1725 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
1726 if (retval
== ERROR_OK
) {
1727 target_request(target
, request
);
1728 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1729 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1737 static int aarch64_examine_first(struct target
*target
)
1739 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1740 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1741 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1743 int retval
= ERROR_OK
;
1744 uint64_t debug
, ttypr
;
1746 uint32_t tmp0
, tmp1
;
1747 debug
= ttypr
= cpuid
= 0;
1749 /* We do one extra read to ensure DAP is configured,
1750 * we call ahbap_debugport_init(swjdp) instead
1752 retval
= dap_dp_init(swjdp
);
1753 if (retval
!= ERROR_OK
)
1756 /* Search for the APB-AB - it is needed for access to debug registers */
1757 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
1758 if (retval
!= ERROR_OK
) {
1759 LOG_ERROR("Could not find APB-AP for debug access");
1763 retval
= mem_ap_init(armv8
->debug_ap
);
1764 if (retval
!= ERROR_OK
) {
1765 LOG_ERROR("Could not initialize the APB-AP");
1769 armv8
->debug_ap
->memaccess_tck
= 80;
1771 if (!target
->dbgbase_set
) {
1773 /* Get ROM Table base */
1775 int32_t coreidx
= target
->coreid
;
1776 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
1777 if (retval
!= ERROR_OK
)
1779 /* Lookup 0x15 -- Processor DAP */
1780 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
1781 &armv8
->debug_base
, &coreidx
);
1782 if (retval
!= ERROR_OK
)
1784 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
1785 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
1787 armv8
->debug_base
= target
->dbgbase
;
1789 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1790 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
1791 if (retval
!= ERROR_OK
) {
1792 LOG_DEBUG("LOCK debug access fail");
1796 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1797 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
1798 if (retval
!= ERROR_OK
) {
1799 LOG_DEBUG("Examine %s failed", "oslock");
1803 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1804 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
1805 if (retval
!= ERROR_OK
) {
1806 LOG_DEBUG("Examine %s failed", "CPUID");
1810 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1811 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
1812 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1813 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
1814 if (retval
!= ERROR_OK
) {
1815 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1819 ttypr
= (ttypr
<< 32) | tmp0
;
1821 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1822 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
1823 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1824 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
1825 if (retval
!= ERROR_OK
) {
1826 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1830 debug
= (debug
<< 32) | tmp0
;
1832 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1833 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
1834 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
1836 if (target
->ctibase
== 0) {
1837 /* assume a v8 rom table layout */
1838 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
1839 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
1841 armv8
->cti_base
= target
->ctibase
;
1843 armv8
->arm
.core_type
= ARM_MODE_MON
;
1844 retval
= aarch64_dpm_setup(aarch64
, debug
);
1845 if (retval
!= ERROR_OK
)
1848 /* Setup Breakpoint Register Pairs */
1849 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
1850 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
1851 aarch64
->brp_num_available
= aarch64
->brp_num
;
1852 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
1853 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
1854 aarch64
->brp_list
[i
].used
= 0;
1855 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
1856 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
1858 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
1859 aarch64
->brp_list
[i
].value
= 0;
1860 aarch64
->brp_list
[i
].control
= 0;
1861 aarch64
->brp_list
[i
].BRPn
= i
;
1864 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
1866 target_set_examined(target
);
1870 static int aarch64_examine(struct target
*target
)
1872 int retval
= ERROR_OK
;
1874 /* don't re-probe hardware after each reset */
1875 if (!target_was_examined(target
))
1876 retval
= aarch64_examine_first(target
);
1878 /* Configure core debug access */
1879 if (retval
== ERROR_OK
)
1880 retval
= aarch64_init_debug_access(target
);
1886 * Cortex-A8 target creation and initialization
1889 static int aarch64_init_target(struct command_context
*cmd_ctx
,
1890 struct target
*target
)
1892 /* examine_first() does a bunch of this */
1896 static int aarch64_init_arch_info(struct target
*target
,
1897 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
1899 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1900 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
1902 armv8
->arm
.dap
= dap
;
1904 /* Setup struct aarch64_common */
1905 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
1906 /* tap has no dap initialized */
1908 tap
->dap
= dap_init();
1910 /* Leave (only) generic DAP stuff for debugport_init() */
1911 tap
->dap
->tap
= tap
;
1914 armv8
->arm
.dap
= tap
->dap
;
1916 aarch64
->fast_reg_read
= 0;
1918 /* register arch-specific functions */
1919 armv8
->examine_debug_reason
= NULL
;
1921 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
1923 armv8
->pre_restore_context
= NULL
;
1925 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
1927 /* REVISIT v7a setup should be in a v7a-specific routine */
1928 armv8_init_arch_info(target
, armv8
);
1929 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
1934 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
1936 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
1938 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
1941 static int aarch64_mmu(struct target
*target
, int *enabled
)
1943 if (target
->state
!= TARGET_HALTED
) {
1944 LOG_ERROR("%s: target not halted", __func__
);
1945 return ERROR_TARGET_INVALID
;
1948 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
1952 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
1953 target_addr_t
*phys
)
1955 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
1958 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
1960 struct target
*target
= get_current_target(CMD_CTX
);
1961 struct armv8_common
*armv8
= target_to_armv8(target
);
1963 return armv8_handle_cache_info_command(CMD_CTX
,
1964 &armv8
->armv8_mmu
.armv8_cache
);
1968 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
1970 struct target
*target
= get_current_target(CMD_CTX
);
1971 if (!target_was_examined(target
)) {
1972 LOG_ERROR("target not examined yet");
1976 return aarch64_init_debug_access(target
);
1978 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
1980 struct target
*target
= get_current_target(CMD_CTX
);
1981 /* check target is an smp target */
1982 struct target_list
*head
;
1983 struct target
*curr
;
1984 head
= target
->head
;
1986 if (head
!= (struct target_list
*)NULL
) {
1987 while (head
!= (struct target_list
*)NULL
) {
1988 curr
= head
->target
;
1992 /* fixes the target display to the debugger */
1993 target
->gdb_service
->target
= target
;
1998 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2000 struct target
*target
= get_current_target(CMD_CTX
);
2001 struct target_list
*head
;
2002 struct target
*curr
;
2003 head
= target
->head
;
2004 if (head
!= (struct target_list
*)NULL
) {
2006 while (head
!= (struct target_list
*)NULL
) {
2007 curr
= head
->target
;
2015 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2017 struct target
*target
= get_current_target(CMD_CTX
);
2018 int retval
= ERROR_OK
;
2019 struct target_list
*head
;
2020 head
= target
->head
;
2021 if (head
!= (struct target_list
*)NULL
) {
2022 if (CMD_ARGC
== 1) {
2024 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2025 if (ERROR_OK
!= retval
)
2027 target
->gdb_service
->core
[1] = coreid
;
2030 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2031 , target
->gdb_service
->core
[1]);
2036 static const struct command_registration aarch64_exec_command_handlers
[] = {
2038 .name
= "cache_info",
2039 .handler
= aarch64_handle_cache_info_command
,
2040 .mode
= COMMAND_EXEC
,
2041 .help
= "display information about target caches",
2046 .handler
= aarch64_handle_dbginit_command
,
2047 .mode
= COMMAND_EXEC
,
2048 .help
= "Initialize core debug",
2051 { .name
= "smp_off",
2052 .handler
= aarch64_handle_smp_off_command
,
2053 .mode
= COMMAND_EXEC
,
2054 .help
= "Stop smp handling",
2059 .handler
= aarch64_handle_smp_on_command
,
2060 .mode
= COMMAND_EXEC
,
2061 .help
= "Restart smp handling",
2066 .handler
= aarch64_handle_smp_gdb_command
,
2067 .mode
= COMMAND_EXEC
,
2068 .help
= "display/fix current core played to gdb",
2073 COMMAND_REGISTRATION_DONE
2075 static const struct command_registration aarch64_command_handlers
[] = {
2077 .chain
= armv8_command_handlers
,
2081 .mode
= COMMAND_ANY
,
2082 .help
= "Cortex-A command group",
2084 .chain
= aarch64_exec_command_handlers
,
2086 COMMAND_REGISTRATION_DONE
2089 struct target_type aarch64_target
= {
2092 .poll
= aarch64_poll
,
2093 .arch_state
= armv8_arch_state
,
2095 .halt
= aarch64_halt
,
2096 .resume
= aarch64_resume
,
2097 .step
= aarch64_step
,
2099 .assert_reset
= aarch64_assert_reset
,
2100 .deassert_reset
= aarch64_deassert_reset
,
2102 /* REVISIT allow exporting VFP3 registers ... */
2103 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2105 .read_memory
= aarch64_read_memory
,
2106 .write_memory
= aarch64_write_memory
,
2108 .checksum_memory
= arm_checksum_memory
,
2109 .blank_check_memory
= arm_blank_check_memory
,
2111 .run_algorithm
= armv4_5_run_algorithm
,
2113 .add_breakpoint
= aarch64_add_breakpoint
,
2114 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2115 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2116 .remove_breakpoint
= aarch64_remove_breakpoint
,
2117 .add_watchpoint
= NULL
,
2118 .remove_watchpoint
= NULL
,
2120 .commands
= aarch64_command_handlers
,
2121 .target_create
= aarch64_target_create
,
2122 .init_target
= aarch64_init_target
,
2123 .examine
= aarch64_examine
,
2125 .read_phys_memory
= aarch64_read_phys_memory
,
2126 .write_phys_memory
= aarch64_write_phys_memory
,
2128 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)