1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
32 static int aarch64_poll(struct target
*target
);
33 static int aarch64_debug_entry(struct target
*target
);
34 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
35 static int aarch64_set_breakpoint(struct target
*target
,
36 struct breakpoint
*breakpoint
, uint8_t matchmode
);
37 static int aarch64_set_context_breakpoint(struct target
*target
,
38 struct breakpoint
*breakpoint
, uint8_t matchmode
);
39 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
40 struct breakpoint
*breakpoint
);
41 static int aarch64_unset_breakpoint(struct target
*target
,
42 struct breakpoint
*breakpoint
);
43 static int aarch64_mmu(struct target
*target
, int *enabled
);
44 static int aarch64_virt2phys(struct target
*target
,
45 target_addr_t virt
, target_addr_t
*phys
);
46 static int aarch64_read_apb_ap_memory(struct target
*target
,
47 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
48 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
49 uint32_t opcode
, uint32_t data
);
51 static int aarch64_restore_system_control_reg(struct target
*target
)
53 int retval
= ERROR_OK
;
55 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
56 struct armv8_common
*armv8
= target_to_armv8(target
);
58 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
59 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62 switch (armv8
->arm
.core_mode
) {
66 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
69 aarch64
->system_control_reg
);
70 if (retval
!= ERROR_OK
)
75 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
78 aarch64
->system_control_reg
);
79 if (retval
!= ERROR_OK
)
84 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
87 aarch64
->system_control_reg
);
88 if (retval
!= ERROR_OK
)
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target
*target
, uint32_t address
)
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target
*target
, int enable
)
110 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
111 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
112 int retval
= ERROR_OK
;
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64
->system_control_reg
& 0x1U
)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
121 aarch64
->system_control_reg_curr
|= 0x1U
;
122 switch (armv8
->arm
.core_mode
) {
126 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
129 aarch64
->system_control_reg_curr
);
130 if (retval
!= ERROR_OK
)
135 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
138 aarch64
->system_control_reg_curr
);
139 if (retval
!= ERROR_OK
)
144 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
147 aarch64
->system_control_reg_curr
);
148 if (retval
!= ERROR_OK
)
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
156 if (aarch64
->system_control_reg_curr
& 0x4U
) {
157 /* data cache is active */
158 aarch64
->system_control_reg_curr
&= ~0x4U
;
159 /* flush data cache armv7 function to be called */
160 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
161 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
163 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
164 aarch64
->system_control_reg_curr
&= ~0x1U
;
165 switch (armv8
->arm
.core_mode
) {
169 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
172 aarch64
->system_control_reg_curr
);
173 if (retval
!= ERROR_OK
)
178 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
181 aarch64
->system_control_reg_curr
);
182 if (retval
!= ERROR_OK
)
187 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
190 aarch64
->system_control_reg_curr
);
191 if (retval
!= ERROR_OK
)
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
204 * Basic debug access, very low level assumes state is saved
206 static int aarch64_init_debug_access(struct target
*target
)
208 struct armv8_common
*armv8
= target_to_armv8(target
);
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
217 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
218 if (retval
!= ERROR_OK
) {
220 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
221 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
222 if (retval
== ERROR_OK
)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
225 if (retval
!= ERROR_OK
)
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
230 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
231 if (retval
!= ERROR_OK
)
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
236 /* Resync breakpoint registers */
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target
);
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
247 static int aarch64_exec_opcode(struct target
*target
,
248 uint32_t opcode
, uint32_t *dscr_p
)
252 struct armv8_common
*armv8
= target_to_armv8(target
);
253 dscr
= dscr_p
? *dscr_p
: 0;
255 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
257 /* Wait for InstrCompl bit to be set */
258 long long then
= timeval_ms();
259 while ((dscr
& DSCR_ITE
) == 0) {
260 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
261 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
262 if (retval
!= ERROR_OK
) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
266 if (timeval_ms() > then
+ 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
272 retval
= mem_ap_write_u32(armv8
->debug_ap
,
273 armv8
->debug_base
+ CPUV8_DBG_ITR
, opcode
);
274 if (retval
!= ERROR_OK
)
279 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
280 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
281 if (retval
!= ERROR_OK
) {
282 LOG_ERROR("Could not read DSCR register");
285 if (timeval_ms() > then
+ 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
289 } while ((dscr
& DSCR_ITE
) == 0); /* Wait for InstrCompl bit to be set */
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
303 struct armv8_common
*armv8
= target_to_armv8(target
);
305 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
311 * AARCH64 implementation of Debug Programmer's Model
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
321 static inline struct aarch64_common
*dpm_to_a8(struct arm_dpm
*dpm
)
323 return container_of(dpm
, struct aarch64_common
, armv8_common
.dpm
);
326 static int aarch64_write_dcc(struct armv8_common
*armv8
, uint32_t data
)
328 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
329 return mem_ap_write_u32(armv8
->debug_ap
,
330 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
333 static int aarch64_write_dcc_64(struct armv8_common
*armv8
, uint64_t data
)
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32
, (unsigned)data
);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32
, (unsigned)(data
>> 32));
338 ret
= mem_ap_write_u32(armv8
->debug_ap
,
339 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
340 ret
+= mem_ap_write_u32(armv8
->debug_ap
,
341 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, data
>> 32);
345 static int aarch64_read_dcc(struct armv8_common
*armv8
, uint32_t *data
,
348 uint32_t dscr
= DSCR_ITE
;
354 /* Wait for DTRRXfull */
355 long long then
= timeval_ms();
356 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
357 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
358 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
360 if (retval
!= ERROR_OK
)
362 if (timeval_ms() > then
+ 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
368 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
369 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
371 if (retval
!= ERROR_OK
)
373 LOG_DEBUG("read DCC 0x%08" PRIx32
, *data
);
381 static int aarch64_read_dcc_64(struct armv8_common
*armv8
, uint64_t *data
,
384 uint32_t dscr
= DSCR_ITE
;
391 /* Wait for DTRRXfull */
392 long long then
= timeval_ms();
393 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
394 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
395 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
397 if (retval
!= ERROR_OK
)
399 if (timeval_ms() > then
+ 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
405 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
406 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
408 if (retval
!= ERROR_OK
)
411 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
412 armv8
->debug_base
+ CPUV8_DBG_DTRRX
,
414 if (retval
!= ERROR_OK
)
417 *data
= *(uint32_t *)data
| (uint64_t)higher
<< 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64
, *data
);
426 static int aarch64_dpm_prepare(struct arm_dpm
*dpm
)
428 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then
= timeval_ms();
435 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
436 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DSCR
,
438 if (retval
!= ERROR_OK
)
440 if ((dscr
& DSCR_ITE
) != 0)
442 if (timeval_ms() > then
+ 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
448 /* this "should never happen" ... */
449 if (dscr
& DSCR_DTR_RX_FULL
) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
452 retval
= mem_ap_read_u32(a8
->armv8_common
.debug_ap
,
453 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DTRRX
, &dscr
);
454 if (retval
!= ERROR_OK
)
457 /* Clear sticky error */
458 retval
= mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
459 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
460 if (retval
!= ERROR_OK
)
467 static int aarch64_dpm_finish(struct arm_dpm
*dpm
)
469 /* REVISIT what could be done here? */
473 static int aarch64_instr_execute(struct arm_dpm
*dpm
,
476 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
477 uint32_t dscr
= DSCR_ITE
;
479 return aarch64_exec_opcode(
480 a8
->armv8_common
.arm
.target
,
485 static int aarch64_instr_write_data_dcc(struct arm_dpm
*dpm
,
486 uint32_t opcode
, uint32_t data
)
488 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
490 uint32_t dscr
= DSCR_ITE
;
492 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
493 if (retval
!= ERROR_OK
)
496 return aarch64_exec_opcode(
497 a8
->armv8_common
.arm
.target
,
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm
*dpm
,
503 uint32_t opcode
, uint64_t data
)
505 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
507 uint32_t dscr
= DSCR_ITE
;
509 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
510 if (retval
!= ERROR_OK
)
513 return aarch64_exec_opcode(
514 a8
->armv8_common
.arm
.target
,
519 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
520 uint32_t opcode
, uint32_t data
)
522 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
523 uint32_t dscr
= DSCR_ITE
;
526 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
527 if (retval
!= ERROR_OK
)
530 retval
= aarch64_exec_opcode(
531 a8
->armv8_common
.arm
.target
,
534 if (retval
!= ERROR_OK
)
537 /* then the opcode, taking data from R0 */
538 retval
= aarch64_exec_opcode(
539 a8
->armv8_common
.arm
.target
,
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm
*dpm
,
547 uint32_t opcode
, uint64_t data
)
549 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
550 uint32_t dscr
= DSCR_ITE
;
553 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
554 if (retval
!= ERROR_OK
)
557 retval
= aarch64_exec_opcode(
558 a8
->armv8_common
.arm
.target
,
561 if (retval
!= ERROR_OK
)
564 /* then the opcode, taking data from R0 */
565 retval
= aarch64_exec_opcode(
566 a8
->armv8_common
.arm
.target
,
573 static int aarch64_instr_cpsr_sync(struct arm_dpm
*dpm
)
575 struct target
*target
= dpm
->arm
->target
;
576 uint32_t dscr
= DSCR_ITE
;
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target
,
580 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
584 static int aarch64_instr_read_data_dcc(struct arm_dpm
*dpm
,
585 uint32_t opcode
, uint32_t *data
)
587 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
589 uint32_t dscr
= DSCR_ITE
;
591 /* the opcode, writing data to DCC */
592 retval
= aarch64_exec_opcode(
593 a8
->armv8_common
.arm
.target
,
596 if (retval
!= ERROR_OK
)
599 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm
*dpm
,
603 uint32_t opcode
, uint64_t *data
)
605 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
607 uint32_t dscr
= DSCR_ITE
;
609 /* the opcode, writing data to DCC */
610 retval
= aarch64_exec_opcode(
611 a8
->armv8_common
.arm
.target
,
614 if (retval
!= ERROR_OK
)
617 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
620 static int aarch64_instr_read_data_r0(struct arm_dpm
*dpm
,
621 uint32_t opcode
, uint32_t *data
)
623 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
624 uint32_t dscr
= DSCR_ITE
;
627 /* the opcode, writing data to R0 */
628 retval
= aarch64_exec_opcode(
629 a8
->armv8_common
.arm
.target
,
632 if (retval
!= ERROR_OK
)
635 /* write R0 to DCC */
636 retval
= aarch64_exec_opcode(
637 a8
->armv8_common
.arm
.target
,
638 0xd5130400, /* msr dbgdtr_el0, x0 */
640 if (retval
!= ERROR_OK
)
643 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm
*dpm
,
647 uint32_t opcode
, uint64_t *data
)
649 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
650 uint32_t dscr
= DSCR_ITE
;
653 /* the opcode, writing data to R0 */
654 retval
= aarch64_exec_opcode(
655 a8
->armv8_common
.arm
.target
,
658 if (retval
!= ERROR_OK
)
661 /* write R0 to DCC */
662 retval
= aarch64_exec_opcode(
663 a8
->armv8_common
.arm
.target
,
664 0xd5130400, /* msr dbgdtr_el0, x0 */
666 if (retval
!= ERROR_OK
)
669 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
672 static int aarch64_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
673 uint32_t addr
, uint32_t control
)
675 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
676 uint32_t vr
= a8
->armv8_common
.debug_base
;
677 uint32_t cr
= a8
->armv8_common
.debug_base
;
681 case 0 ... 15: /* breakpoints */
682 vr
+= CPUV8_DBG_BVR_BASE
;
683 cr
+= CPUV8_DBG_BCR_BASE
;
685 case 16 ... 31: /* watchpoints */
686 vr
+= CPUV8_DBG_WVR_BASE
;
687 cr
+= CPUV8_DBG_WCR_BASE
;
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr
, (unsigned) cr
);
699 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
701 if (retval
!= ERROR_OK
)
703 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
708 static int aarch64_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
713 struct aarch64_common
*a
= dpm_to_a8(dpm
);
718 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_BCR_BASE
;
721 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_WCR_BASE
;
729 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr
);
731 /* clear control register */
732 return aarch64_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
736 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint32_t debug
)
738 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
741 dpm
->arm
= &a8
->armv8_common
.arm
;
744 dpm
->prepare
= aarch64_dpm_prepare
;
745 dpm
->finish
= aarch64_dpm_finish
;
747 dpm
->instr_execute
= aarch64_instr_execute
;
748 dpm
->instr_write_data_dcc
= aarch64_instr_write_data_dcc
;
749 dpm
->instr_write_data_dcc_64
= aarch64_instr_write_data_dcc_64
;
750 dpm
->instr_write_data_r0
= aarch64_instr_write_data_r0
;
751 dpm
->instr_write_data_r0_64
= aarch64_instr_write_data_r0_64
;
752 dpm
->instr_cpsr_sync
= aarch64_instr_cpsr_sync
;
754 dpm
->instr_read_data_dcc
= aarch64_instr_read_data_dcc
;
755 dpm
->instr_read_data_dcc_64
= aarch64_instr_read_data_dcc_64
;
756 dpm
->instr_read_data_r0
= aarch64_instr_read_data_r0
;
757 dpm
->instr_read_data_r0_64
= aarch64_instr_read_data_r0_64
;
759 dpm
->arm_reg_current
= armv8_reg_current
;
761 dpm
->bpwp_enable
= aarch64_bpwp_enable
;
762 dpm
->bpwp_disable
= aarch64_bpwp_disable
;
764 retval
= armv8_dpm_setup(dpm
);
765 if (retval
== ERROR_OK
)
766 retval
= armv8_dpm_initialize(dpm
);
770 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
772 struct target_list
*head
;
776 while (head
!= (struct target_list
*)NULL
) {
778 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
784 static int aarch64_halt(struct target
*target
);
786 static int aarch64_halt_smp(struct target
*target
)
789 struct target_list
*head
;
792 while (head
!= (struct target_list
*)NULL
) {
794 if ((curr
!= target
) && (curr
->state
!= TARGET_HALTED
))
795 retval
+= aarch64_halt(curr
);
801 static int update_halt_gdb(struct target
*target
)
804 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
805 target
->gdb_service
->target
= target
;
806 target
->gdb_service
->core
[0] = target
->coreid
;
807 retval
+= aarch64_halt_smp(target
);
813 * Cortex-A8 Run control
816 static int aarch64_poll(struct target
*target
)
818 int retval
= ERROR_OK
;
820 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
821 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
822 enum target_state prev_target_state
= target
->state
;
823 /* toggle to another core is done by gdb as follow */
824 /* maint packet J core_id */
826 /* the next polling trigger an halt event sent to gdb */
827 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
828 (target
->gdb_service
) &&
829 (target
->gdb_service
->target
== NULL
)) {
830 target
->gdb_service
->target
=
831 get_aarch64(target
, target
->gdb_service
->core
[1]);
832 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
835 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
836 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
837 if (retval
!= ERROR_OK
)
839 aarch64
->cpudbg_dscr
= dscr
;
841 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
)) {
842 if (prev_target_state
!= TARGET_HALTED
) {
843 /* We have a halting debug event */
844 LOG_DEBUG("Target halted");
845 target
->state
= TARGET_HALTED
;
846 if ((prev_target_state
== TARGET_RUNNING
)
847 || (prev_target_state
== TARGET_UNKNOWN
)
848 || (prev_target_state
== TARGET_RESET
)) {
849 retval
= aarch64_debug_entry(target
);
850 if (retval
!= ERROR_OK
)
853 retval
= update_halt_gdb(target
);
854 if (retval
!= ERROR_OK
)
857 target_call_event_callbacks(target
,
858 TARGET_EVENT_HALTED
);
860 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
863 retval
= aarch64_debug_entry(target
);
864 if (retval
!= ERROR_OK
)
867 retval
= update_halt_gdb(target
);
868 if (retval
!= ERROR_OK
)
872 target_call_event_callbacks(target
,
873 TARGET_EVENT_DEBUG_HALTED
);
876 } else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
877 target
->state
= TARGET_RUNNING
;
879 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
880 target
->state
= TARGET_UNKNOWN
;
886 static int aarch64_halt(struct target
*target
)
888 int retval
= ERROR_OK
;
890 struct armv8_common
*armv8
= target_to_armv8(target
);
893 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
894 armv8
->cti_base
+ CTI_CTR
, 1);
895 if (retval
!= ERROR_OK
)
898 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
899 armv8
->cti_base
+ CTI_GATE
, 3);
900 if (retval
!= ERROR_OK
)
903 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
904 armv8
->cti_base
+ CTI_OUTEN0
, 1);
905 if (retval
!= ERROR_OK
)
908 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
909 armv8
->cti_base
+ CTI_OUTEN1
, 2);
910 if (retval
!= ERROR_OK
)
914 * add HDE in halting debug mode
916 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
917 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
918 if (retval
!= ERROR_OK
)
921 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
922 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
| DSCR_HDE
);
923 if (retval
!= ERROR_OK
)
926 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
927 armv8
->cti_base
+ CTI_APPPULSE
, 1);
928 if (retval
!= ERROR_OK
)
931 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
932 armv8
->cti_base
+ CTI_INACK
, 1);
933 if (retval
!= ERROR_OK
)
937 long long then
= timeval_ms();
939 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
940 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
941 if (retval
!= ERROR_OK
)
943 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
945 if (timeval_ms() > then
+ 1000) {
946 LOG_ERROR("Timeout waiting for halt");
951 target
->debug_reason
= DBG_REASON_DBGRQ
;
956 static int aarch64_internal_restore(struct target
*target
, int current
,
957 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
959 struct armv8_common
*armv8
= target_to_armv8(target
);
960 struct arm
*arm
= &armv8
->arm
;
964 if (!debug_execution
)
965 target_free_all_working_areas(target
);
967 /* current = 1: continue on current pc, otherwise continue at <address> */
968 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
970 resume_pc
= *address
;
972 *address
= resume_pc
;
974 /* Make sure that the Armv7 gdb thumb fixups does not
975 * kill the return address
977 switch (arm
->core_state
) {
979 resume_pc
&= 0xFFFFFFFC;
981 case ARM_STATE_AARCH64
:
982 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
984 case ARM_STATE_THUMB
:
985 case ARM_STATE_THUMB_EE
:
986 /* When the return address is loaded into PC
987 * bit 0 must be 1 to stay in Thumb state
991 case ARM_STATE_JAZELLE
:
992 LOG_ERROR("How do I resume into Jazelle state??");
995 LOG_DEBUG("resume pc = 0x%16" PRIx64
, resume_pc
);
996 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
999 dpmv8_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1001 /* called it now before restoring context because it uses cpu
1002 * register r0 for restoring system control register */
1003 retval
= aarch64_restore_system_control_reg(target
);
1004 if (retval
!= ERROR_OK
)
1006 retval
= aarch64_restore_context(target
, handle_breakpoints
);
1007 if (retval
!= ERROR_OK
)
1009 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1010 target
->state
= TARGET_RUNNING
;
1012 /* registers are now invalid */
1013 register_cache_invalidate(arm
->core_cache
);
1016 /* the front-end may request us not to handle breakpoints */
1017 if (handle_breakpoints
) {
1018 /* Single step past breakpoint at current address */
1019 breakpoint
= breakpoint_find(target
, resume_pc
);
1021 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
1022 cortex_m3_unset_breakpoint(target
, breakpoint
);
1023 cortex_m3_single_step_core(target
);
1024 cortex_m3_set_breakpoint(target
, breakpoint
);
1032 static int aarch64_internal_restart(struct target
*target
)
1034 struct armv8_common
*armv8
= target_to_armv8(target
);
1035 struct arm
*arm
= &armv8
->arm
;
1039 * * Restart core and wait for it to be started. Clear ITRen and sticky
1040 * * exception flags: see ARMv7 ARM, C5.9.
1042 * REVISIT: for single stepping, we probably want to
1043 * disable IRQs by default, with optional override...
1046 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1047 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1048 if (retval
!= ERROR_OK
)
1051 if ((dscr
& DSCR_ITE
) == 0)
1052 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1054 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1055 armv8
->cti_base
+ CTI_APPPULSE
, 2);
1056 if (retval
!= ERROR_OK
)
1059 long long then
= timeval_ms();
1061 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1062 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1063 if (retval
!= ERROR_OK
)
1065 if ((dscr
& DSCR_HDE
) != 0)
1067 if (timeval_ms() > then
+ 1000) {
1068 LOG_ERROR("Timeout waiting for resume");
1073 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1074 target
->state
= TARGET_RUNNING
;
1076 /* registers are now invalid */
1077 register_cache_invalidate(arm
->core_cache
);
1082 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
1085 struct target_list
*head
;
1086 struct target
*curr
;
1088 head
= target
->head
;
1089 while (head
!= (struct target_list
*)NULL
) {
1090 curr
= head
->target
;
1091 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
1092 /* resume current address , not in step mode */
1093 retval
+= aarch64_internal_restore(curr
, 1, &address
,
1094 handle_breakpoints
, 0);
1095 retval
+= aarch64_internal_restart(curr
);
1103 static int aarch64_resume(struct target
*target
, int current
,
1104 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1107 uint64_t addr
= address
;
1109 /* dummy resume for smp toggle in order to reduce gdb impact */
1110 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
1111 /* simulate a start and halt of target */
1112 target
->gdb_service
->target
= NULL
;
1113 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
1114 /* fake resume at next poll we play the target core[1], see poll*/
1115 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1118 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
1121 target
->gdb_service
->core
[0] = -1;
1122 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
1123 if (retval
!= ERROR_OK
)
1126 aarch64_internal_restart(target
);
1128 if (!debug_execution
) {
1129 target
->state
= TARGET_RUNNING
;
1130 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1131 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
1133 target
->state
= TARGET_DEBUG_RUNNING
;
1134 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1135 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
1141 static int aarch64_debug_entry(struct target
*target
)
1144 int retval
= ERROR_OK
;
1145 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1146 struct armv8_common
*armv8
= target_to_armv8(target
);
1149 LOG_DEBUG("dscr = 0x%08" PRIx32
, aarch64
->cpudbg_dscr
);
1151 /* REVISIT surely we should not re-read DSCR !! */
1152 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1153 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1154 if (retval
!= ERROR_OK
)
1157 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1158 * imprecise data aborts get discarded by issuing a Data
1159 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1162 /* Enable the ITR execution once we are in debug mode */
1163 dscr
|= DSCR_ITR_EN
;
1164 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1165 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1166 if (retval
!= ERROR_OK
)
1169 /* Examine debug reason */
1170 arm_dpm_report_dscr(&armv8
->dpm
, aarch64
->cpudbg_dscr
);
1171 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1172 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &tmp
);
1173 if ((tmp
& 0x7) == 0x4)
1174 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1176 /* save address of instruction that triggered the watchpoint? */
1177 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
1180 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1181 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
1183 if (retval
!= ERROR_OK
)
1185 arm_dpm_report_wfar(&armv8
->dpm
, wfar
);
1188 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1190 if (armv8
->post_debug_entry
) {
1191 retval
= armv8
->post_debug_entry(target
);
1192 if (retval
!= ERROR_OK
)
1199 static int aarch64_post_debug_entry(struct target
*target
)
1201 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1202 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1205 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1206 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1207 switch (armv8
->arm
.core_mode
) {
1211 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1212 0, 0, /* op1, op2 */
1213 1, 0, /* CRn, CRm */
1214 &aarch64
->system_control_reg
);
1215 if (retval
!= ERROR_OK
)
1220 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1221 4, 0, /* op1, op2 */
1222 1, 0, /* CRn, CRm */
1223 &aarch64
->system_control_reg
);
1224 if (retval
!= ERROR_OK
)
1229 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1230 6, 0, /* op1, op2 */
1231 1, 0, /* CRn, CRm */
1232 &aarch64
->system_control_reg
);
1233 if (retval
!= ERROR_OK
)
1237 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
1239 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1240 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1243 if (armv8
->armv8_mmu
.armv8_cache
.ctype
== -1)
1244 armv8_identify_cache(target
);
1247 armv8
->armv8_mmu
.mmu_enabled
=
1248 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1249 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1250 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1251 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1252 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1253 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
1257 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1258 int handle_breakpoints
)
1260 struct armv8_common
*armv8
= target_to_armv8(target
);
1264 if (target
->state
!= TARGET_HALTED
) {
1265 LOG_WARNING("target not halted");
1266 return ERROR_TARGET_NOT_HALTED
;
1269 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1270 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &tmp
);
1271 if (retval
!= ERROR_OK
)
1274 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1275 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (tmp
|0x4));
1276 if (retval
!= ERROR_OK
)
1279 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1280 retval
= aarch64_resume(target
, 1, address
, 0, 0);
1281 if (retval
!= ERROR_OK
)
1284 long long then
= timeval_ms();
1285 while (target
->state
!= TARGET_HALTED
) {
1286 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1287 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &tmp
);
1288 LOG_DEBUG("DESR = %#x", tmp
);
1289 retval
= aarch64_poll(target
);
1290 if (retval
!= ERROR_OK
)
1292 if (timeval_ms() > then
+ 1000) {
1293 LOG_ERROR("timeout waiting for target halt");
1298 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1299 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (tmp
&(~0x4)));
1300 if (retval
!= ERROR_OK
)
1303 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1304 if (target
->state
== TARGET_HALTED
)
1305 LOG_DEBUG("target stepped");
1310 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1312 struct armv8_common
*armv8
= target_to_armv8(target
);
1316 if (armv8
->pre_restore_context
)
1317 armv8
->pre_restore_context(target
);
1319 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1324 * Cortex-A8 Breakpoint and watchpoint functions
1327 /* Setup hardware Breakpoint Register Pair */
1328 static int aarch64_set_breakpoint(struct target
*target
,
1329 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1334 uint8_t byte_addr_select
= 0x0F;
1335 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1336 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1337 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1340 if (breakpoint
->set
) {
1341 LOG_WARNING("breakpoint already set");
1345 if (breakpoint
->type
== BKPT_HARD
) {
1347 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1349 if (brp_i
>= aarch64
->brp_num
) {
1350 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1351 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1353 breakpoint
->set
= brp_i
+ 1;
1354 if (breakpoint
->length
== 2)
1355 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1356 control
= ((matchmode
& 0x7) << 20)
1358 | (byte_addr_select
<< 5)
1360 brp_list
[brp_i
].used
= 1;
1361 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1362 brp_list
[brp_i
].control
= control
;
1363 bpt_value
= brp_list
[brp_i
].value
;
1365 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1366 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1367 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1368 if (retval
!= ERROR_OK
)
1370 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1371 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1372 (uint32_t)(bpt_value
>> 32));
1373 if (retval
!= ERROR_OK
)
1376 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1377 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1378 brp_list
[brp_i
].control
);
1379 if (retval
!= ERROR_OK
)
1381 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1382 brp_list
[brp_i
].control
,
1383 brp_list
[brp_i
].value
);
1385 } else if (breakpoint
->type
== BKPT_SOFT
) {
1387 buf_set_u32(code
, 0, 32, 0xD4400000);
1389 retval
= target_read_memory(target
,
1390 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1391 breakpoint
->length
, 1,
1392 breakpoint
->orig_instr
);
1393 if (retval
!= ERROR_OK
)
1395 retval
= target_write_memory(target
,
1396 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1397 breakpoint
->length
, 1, code
);
1398 if (retval
!= ERROR_OK
)
1400 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1403 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1404 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1405 /* Ensure that halting debug mode is enable */
1406 dscr
= dscr
| DSCR_HDE
;
1407 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1408 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1409 if (retval
!= ERROR_OK
) {
1410 LOG_DEBUG("Failed to set DSCR.HDE");
1417 static int aarch64_set_context_breakpoint(struct target
*target
,
1418 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1420 int retval
= ERROR_FAIL
;
1423 uint8_t byte_addr_select
= 0x0F;
1424 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1425 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1426 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1428 if (breakpoint
->set
) {
1429 LOG_WARNING("breakpoint already set");
1432 /*check available context BRPs*/
1433 while ((brp_list
[brp_i
].used
||
1434 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1437 if (brp_i
>= aarch64
->brp_num
) {
1438 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1442 breakpoint
->set
= brp_i
+ 1;
1443 control
= ((matchmode
& 0x7) << 20)
1445 | (byte_addr_select
<< 5)
1447 brp_list
[brp_i
].used
= 1;
1448 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1449 brp_list
[brp_i
].control
= control
;
1450 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1451 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1452 brp_list
[brp_i
].value
);
1453 if (retval
!= ERROR_OK
)
1455 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1456 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1457 brp_list
[brp_i
].control
);
1458 if (retval
!= ERROR_OK
)
1460 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1461 brp_list
[brp_i
].control
,
1462 brp_list
[brp_i
].value
);
1467 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1469 int retval
= ERROR_FAIL
;
1470 int brp_1
= 0; /* holds the contextID pair */
1471 int brp_2
= 0; /* holds the IVA pair */
1472 uint32_t control_CTX
, control_IVA
;
1473 uint8_t CTX_byte_addr_select
= 0x0F;
1474 uint8_t IVA_byte_addr_select
= 0x0F;
1475 uint8_t CTX_machmode
= 0x03;
1476 uint8_t IVA_machmode
= 0x01;
1477 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1478 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1479 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1481 if (breakpoint
->set
) {
1482 LOG_WARNING("breakpoint already set");
1485 /*check available context BRPs*/
1486 while ((brp_list
[brp_1
].used
||
1487 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1490 printf("brp(CTX) found num: %d\n", brp_1
);
1491 if (brp_1
>= aarch64
->brp_num
) {
1492 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1496 while ((brp_list
[brp_2
].used
||
1497 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1500 printf("brp(IVA) found num: %d\n", brp_2
);
1501 if (brp_2
>= aarch64
->brp_num
) {
1502 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1506 breakpoint
->set
= brp_1
+ 1;
1507 breakpoint
->linked_BRP
= brp_2
;
1508 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1511 | (CTX_byte_addr_select
<< 5)
1513 brp_list
[brp_1
].used
= 1;
1514 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1515 brp_list
[brp_1
].control
= control_CTX
;
1516 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1517 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1518 brp_list
[brp_1
].value
);
1519 if (retval
!= ERROR_OK
)
1521 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1522 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1523 brp_list
[brp_1
].control
);
1524 if (retval
!= ERROR_OK
)
1527 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1530 | (IVA_byte_addr_select
<< 5)
1532 brp_list
[brp_2
].used
= 1;
1533 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1534 brp_list
[brp_2
].control
= control_IVA
;
1535 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1536 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1537 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1538 if (retval
!= ERROR_OK
)
1540 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1541 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1542 brp_list
[brp_2
].value
>> 32);
1543 if (retval
!= ERROR_OK
)
1545 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1546 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1547 brp_list
[brp_2
].control
);
1548 if (retval
!= ERROR_OK
)
1554 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1557 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1558 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1559 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1561 if (!breakpoint
->set
) {
1562 LOG_WARNING("breakpoint not set");
1566 if (breakpoint
->type
== BKPT_HARD
) {
1567 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1568 int brp_i
= breakpoint
->set
- 1;
1569 int brp_j
= breakpoint
->linked_BRP
;
1570 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1571 LOG_DEBUG("Invalid BRP number in breakpoint");
1574 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1575 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1576 brp_list
[brp_i
].used
= 0;
1577 brp_list
[brp_i
].value
= 0;
1578 brp_list
[brp_i
].control
= 0;
1579 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1580 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1581 brp_list
[brp_i
].control
);
1582 if (retval
!= ERROR_OK
)
1584 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1585 LOG_DEBUG("Invalid BRP number in breakpoint");
1588 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1589 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1590 brp_list
[brp_j
].used
= 0;
1591 brp_list
[brp_j
].value
= 0;
1592 brp_list
[brp_j
].control
= 0;
1593 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1594 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1595 brp_list
[brp_j
].control
);
1596 if (retval
!= ERROR_OK
)
1598 breakpoint
->linked_BRP
= 0;
1599 breakpoint
->set
= 0;
1603 int brp_i
= breakpoint
->set
- 1;
1604 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1605 LOG_DEBUG("Invalid BRP number in breakpoint");
1608 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1609 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1610 brp_list
[brp_i
].used
= 0;
1611 brp_list
[brp_i
].value
= 0;
1612 brp_list
[brp_i
].control
= 0;
1613 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1614 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1615 brp_list
[brp_i
].control
);
1616 if (retval
!= ERROR_OK
)
1618 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1619 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1620 brp_list
[brp_i
].value
);
1621 if (retval
!= ERROR_OK
)
1623 breakpoint
->set
= 0;
1627 /* restore original instruction (kept in target endianness) */
1628 if (breakpoint
->length
== 4) {
1629 retval
= target_write_memory(target
,
1630 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1631 4, 1, breakpoint
->orig_instr
);
1632 if (retval
!= ERROR_OK
)
1635 retval
= target_write_memory(target
,
1636 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1637 2, 1, breakpoint
->orig_instr
);
1638 if (retval
!= ERROR_OK
)
1642 breakpoint
->set
= 0;
1647 static int aarch64_add_breakpoint(struct target
*target
,
1648 struct breakpoint
*breakpoint
)
1650 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1652 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1653 LOG_INFO("no hardware breakpoint available");
1654 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1657 if (breakpoint
->type
== BKPT_HARD
)
1658 aarch64
->brp_num_available
--;
1660 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1663 static int aarch64_add_context_breakpoint(struct target
*target
,
1664 struct breakpoint
*breakpoint
)
1666 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1668 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1669 LOG_INFO("no hardware breakpoint available");
1670 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1673 if (breakpoint
->type
== BKPT_HARD
)
1674 aarch64
->brp_num_available
--;
1676 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1679 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1680 struct breakpoint
*breakpoint
)
1682 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1684 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1685 LOG_INFO("no hardware breakpoint available");
1686 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1689 if (breakpoint
->type
== BKPT_HARD
)
1690 aarch64
->brp_num_available
--;
1692 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1696 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1698 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1701 /* It is perfectly possible to remove breakpoints while the target is running */
1702 if (target
->state
!= TARGET_HALTED
) {
1703 LOG_WARNING("target not halted");
1704 return ERROR_TARGET_NOT_HALTED
;
1708 if (breakpoint
->set
) {
1709 aarch64_unset_breakpoint(target
, breakpoint
);
1710 if (breakpoint
->type
== BKPT_HARD
)
1711 aarch64
->brp_num_available
++;
1718 * Cortex-A8 Reset functions
1721 static int aarch64_assert_reset(struct target
*target
)
1723 struct armv8_common
*armv8
= target_to_armv8(target
);
1727 /* FIXME when halt is requested, make it work somehow... */
1729 /* Issue some kind of warm reset. */
1730 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1731 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1732 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1733 /* REVISIT handle "pulls" cases, if there's
1734 * hardware that needs them to work.
1736 jtag_add_reset(0, 1);
1738 LOG_ERROR("%s: how to reset?", target_name(target
));
1742 /* registers are now invalid */
1743 register_cache_invalidate(armv8
->arm
.core_cache
);
1745 target
->state
= TARGET_RESET
;
1750 static int aarch64_deassert_reset(struct target
*target
)
1756 /* be certain SRST is off */
1757 jtag_add_reset(0, 0);
1759 retval
= aarch64_poll(target
);
1760 if (retval
!= ERROR_OK
)
1763 if (target
->reset_halt
) {
1764 if (target
->state
!= TARGET_HALTED
) {
1765 LOG_WARNING("%s: ran after reset and before halt ...",
1766 target_name(target
));
1767 retval
= target_halt(target
);
1768 if (retval
!= ERROR_OK
)
1776 static int aarch64_write_apb_ap_memory(struct target
*target
,
1777 uint64_t address
, uint32_t size
,
1778 uint32_t count
, const uint8_t *buffer
)
1780 /* write memory through APB-AP */
1781 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1782 struct armv8_common
*armv8
= target_to_armv8(target
);
1783 struct arm
*arm
= &armv8
->arm
;
1784 int total_bytes
= count
* size
;
1786 int start_byte
= address
& 0x3;
1787 int end_byte
= (address
+ total_bytes
) & 0x3;
1790 uint8_t *tmp_buff
= NULL
;
1792 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count%" PRIu32
,
1793 address
, size
, count
);
1794 if (target
->state
!= TARGET_HALTED
) {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED
;
1799 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1801 /* Mark register R0 as dirty, as it will be used
1802 * for transferring the data.
1803 * It will be restored automatically when exiting
1806 reg
= armv8_reg_current(arm
, 1);
1809 reg
= armv8_reg_current(arm
, 0);
1812 /* clear any abort */
1813 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1814 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1815 if (retval
!= ERROR_OK
)
1819 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1821 /* The algorithm only copies 32 bit words, so the buffer
1822 * should be expanded to include the words at either end.
1823 * The first and last words will be read first to avoid
1824 * corruption if needed.
1826 tmp_buff
= malloc(total_u32
* 4);
1828 if ((start_byte
!= 0) && (total_u32
> 1)) {
1829 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1830 * the other bytes in the word.
1832 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1833 if (retval
!= ERROR_OK
)
1834 goto error_free_buff_w
;
1837 /* If end of write is not aligned, or the write is less than 4 bytes */
1838 if ((end_byte
!= 0) ||
1839 ((total_u32
== 1) && (total_bytes
!= 4))) {
1841 /* Read the last word to avoid corruption during 32 bit write */
1842 int mem_offset
= (total_u32
-1) * 4;
1843 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1844 if (retval
!= ERROR_OK
)
1845 goto error_free_buff_w
;
1848 /* Copy the write buffer over the top of the temporary buffer */
1849 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1851 /* We now have a 32 bit aligned buffer that can be written */
1854 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1855 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1856 if (retval
!= ERROR_OK
)
1857 goto error_free_buff_w
;
1859 /* Set Normal access mode */
1860 dscr
= (dscr
& ~DSCR_MA
);
1861 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1862 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1864 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1865 /* Write X0 with value 'address' using write procedure */
1866 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1867 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
1868 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1869 retval
+= aarch64_exec_opcode(target
,
1870 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1872 /* Write R0 with value 'address' using write procedure */
1873 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1874 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
1875 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1876 retval
+= aarch64_exec_opcode(target
,
1877 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
1880 /* Step 1.d - Change DCC to memory mode */
1881 dscr
= dscr
| DSCR_MA
;
1882 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1883 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1884 if (retval
!= ERROR_OK
)
1885 goto error_unset_dtr_w
;
1888 /* Step 2.a - Do the write */
1889 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1890 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1891 if (retval
!= ERROR_OK
)
1892 goto error_unset_dtr_w
;
1894 /* Step 3.a - Switch DTR mode back to Normal mode */
1895 dscr
= (dscr
& ~DSCR_MA
);
1896 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1897 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1898 if (retval
!= ERROR_OK
)
1899 goto error_unset_dtr_w
;
1901 /* Check for sticky abort flags in the DSCR */
1902 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1903 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1904 if (retval
!= ERROR_OK
)
1905 goto error_free_buff_w
;
1906 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1907 /* Abort occurred - clear it and exit */
1908 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1909 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1910 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1911 goto error_free_buff_w
;
1919 /* Unset DTR mode */
1920 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1921 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1922 dscr
= (dscr
& ~DSCR_MA
);
1923 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1924 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1931 static int aarch64_read_apb_ap_memory(struct target
*target
,
1932 target_addr_t address
, uint32_t size
,
1933 uint32_t count
, uint8_t *buffer
)
1935 /* read memory through APB-AP */
1936 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1937 struct armv8_common
*armv8
= target_to_armv8(target
);
1938 struct arm
*arm
= &armv8
->arm
;
1939 int total_bytes
= count
* size
;
1941 int start_byte
= address
& 0x3;
1942 int end_byte
= (address
+ total_bytes
) & 0x3;
1945 uint8_t *tmp_buff
= NULL
;
1949 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count%" PRIu32
,
1950 address
, size
, count
);
1951 if (target
->state
!= TARGET_HALTED
) {
1952 LOG_WARNING("target not halted");
1953 return ERROR_TARGET_NOT_HALTED
;
1956 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1957 /* Mark register X0, X1 as dirty, as it will be used
1958 * for transferring the data.
1959 * It will be restored automatically when exiting
1962 reg
= armv8_reg_current(arm
, 1);
1965 reg
= armv8_reg_current(arm
, 0);
1968 /* clear any abort */
1969 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1970 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1971 if (retval
!= ERROR_OK
)
1972 goto error_free_buff_r
;
1975 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1976 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1978 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1980 /* Set Normal access mode */
1981 dscr
= (dscr
& ~DSCR_MA
);
1982 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1983 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1985 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1986 /* Write X0 with value 'address' using write procedure */
1987 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1988 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
1989 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1990 retval
+= aarch64_exec_opcode(target
, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1991 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1992 retval
+= aarch64_exec_opcode(target
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1993 /* Step 1.e - Change DCC to memory mode */
1994 dscr
= dscr
| DSCR_MA
;
1995 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1996 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1997 /* Step 1.f - read DBGDTRTX and discard the value */
1998 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1999 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2001 /* Write R0 with value 'address' using write procedure */
2002 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2003 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
2004 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2005 retval
+= aarch64_exec_opcode(target
,
2006 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
2007 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2008 retval
+= aarch64_exec_opcode(target
,
2009 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr
);
2010 /* Step 1.e - Change DCC to memory mode */
2011 dscr
= dscr
| DSCR_MA
;
2012 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2013 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2014 /* Step 1.f - read DBGDTRTX and discard the value */
2015 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2016 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2019 if (retval
!= ERROR_OK
)
2020 goto error_unset_dtr_r
;
2022 /* Optimize the read as much as we can, either way we read in a single pass */
2023 if ((start_byte
) || (end_byte
)) {
2024 /* The algorithm only copies 32 bit words, so the buffer
2025 * should be expanded to include the words at either end.
2026 * The first and last words will be read into a temp buffer
2027 * to avoid corruption
2029 tmp_buff
= malloc(total_u32
* 4);
2031 goto error_unset_dtr_r
;
2033 /* use the tmp buffer to read the entire data */
2034 u8buf_ptr
= tmp_buff
;
2036 /* address and read length are aligned so read directly into the passed buffer */
2039 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2040 * Abort flags are sticky, so can be read at end of transactions
2042 * This data is read in aligned to 32 bit boundary.
2045 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2046 * increments X0 by 4. */
2047 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
2048 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2049 if (retval
!= ERROR_OK
)
2050 goto error_unset_dtr_r
;
2052 /* Step 3.a - set DTR access mode back to Normal mode */
2053 dscr
= (dscr
& ~DSCR_MA
);
2054 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2055 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2056 if (retval
!= ERROR_OK
)
2057 goto error_free_buff_r
;
2059 /* Step 3.b - read DBGDTRTX for the final value */
2060 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2061 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2062 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
2064 /* Check for sticky abort flags in the DSCR */
2065 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2066 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2067 if (retval
!= ERROR_OK
)
2068 goto error_free_buff_r
;
2069 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2070 /* Abort occurred - clear it and exit */
2071 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2072 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2073 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
2074 goto error_free_buff_r
;
2077 /* check if we need to copy aligned data by applying any shift necessary */
2079 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
2087 /* Unset DTR mode */
2088 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2089 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2090 dscr
= (dscr
& ~DSCR_MA
);
2091 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2092 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2099 static int aarch64_read_phys_memory(struct target
*target
,
2100 target_addr_t address
, uint32_t size
,
2101 uint32_t count
, uint8_t *buffer
)
2103 struct armv8_common
*armv8
= target_to_armv8(target
);
2104 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2105 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2106 uint8_t apsel
= swjdp
->apsel
;
2107 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
2108 address
, size
, count
);
2110 if (count
&& buffer
) {
2112 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2114 /* read memory through AHB-AP */
2115 retval
= mem_ap_read_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
2117 /* read memory through APB-AP */
2118 retval
= aarch64_mmu_modify(target
, 0);
2119 if (retval
!= ERROR_OK
)
2121 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
2127 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2128 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2130 int mmu_enabled
= 0;
2131 target_addr_t virt
, phys
;
2133 struct armv8_common
*armv8
= target_to_armv8(target
);
2134 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2135 uint8_t apsel
= swjdp
->apsel
;
2137 /* aarch64 handles unaligned memory access */
2138 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2141 /* determine if MMU was enabled on target stop */
2142 if (!armv8
->is_armv7r
) {
2143 retval
= aarch64_mmu(target
, &mmu_enabled
);
2144 if (retval
!= ERROR_OK
)
2148 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2151 retval
= aarch64_virt2phys(target
, virt
, &phys
);
2152 if (retval
!= ERROR_OK
)
2155 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
,
2159 retval
= aarch64_read_phys_memory(target
, address
, size
, count
,
2163 retval
= aarch64_check_address(target
, address
);
2164 if (retval
!= ERROR_OK
)
2166 /* enable MMU as we could have disabled it for phys
2168 retval
= aarch64_mmu_modify(target
, 1);
2169 if (retval
!= ERROR_OK
)
2172 retval
= aarch64_read_apb_ap_memory(target
, address
, size
,
2178 static int aarch64_write_phys_memory(struct target
*target
,
2179 target_addr_t address
, uint32_t size
,
2180 uint32_t count
, const uint8_t *buffer
)
2182 struct armv8_common
*armv8
= target_to_armv8(target
);
2183 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2184 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2185 uint8_t apsel
= swjdp
->apsel
;
2187 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2190 if (count
&& buffer
) {
2192 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2194 /* write memory through AHB-AP */
2195 retval
= mem_ap_write_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
2198 /* write memory through APB-AP */
2199 if (!armv8
->is_armv7r
) {
2200 retval
= aarch64_mmu_modify(target
, 0);
2201 if (retval
!= ERROR_OK
)
2204 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2209 /* REVISIT this op is generic ARMv7-A/R stuff */
2210 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
) {
2211 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
2213 retval
= dpm
->prepare(dpm
);
2214 if (retval
!= ERROR_OK
)
2217 /* The Cache handling will NOT work with MMU active, the
2218 * wrong addresses will be invalidated!
2220 * For both ICache and DCache, walk all cache lines in the
2221 * address range. Cortex-A8 has fixed 64 byte line length.
2223 * REVISIT per ARMv7, these may trigger watchpoints ...
2226 /* invalidate I-Cache */
2227 if (armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
) {
2228 /* ICIMVAU - Invalidate Cache single entry
2230 * MCR p15, 0, r0, c7, c5, 1
2232 for (uint32_t cacheline
= address
;
2233 cacheline
< address
+ size
* count
;
2235 retval
= dpm
->instr_write_data_r0(dpm
,
2236 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2238 if (retval
!= ERROR_OK
)
2243 /* invalidate D-Cache */
2244 if (armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
) {
2245 /* DCIMVAC - Invalidate data Cache line
2247 * MCR p15, 0, r0, c7, c6, 1
2249 for (uint32_t cacheline
= address
;
2250 cacheline
< address
+ size
* count
;
2252 retval
= dpm
->instr_write_data_r0(dpm
,
2253 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2255 if (retval
!= ERROR_OK
)
2260 /* (void) */ dpm
->finish(dpm
);
2266 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2267 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2269 int mmu_enabled
= 0;
2270 target_addr_t virt
, phys
;
2272 struct armv8_common
*armv8
= target_to_armv8(target
);
2273 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2274 uint8_t apsel
= swjdp
->apsel
;
2276 /* aarch64 handles unaligned memory access */
2277 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
2278 "; count %" PRId32
, address
, size
, count
);
2280 /* determine if MMU was enabled on target stop */
2281 if (!armv8
->is_armv7r
) {
2282 retval
= aarch64_mmu(target
, &mmu_enabled
);
2283 if (retval
!= ERROR_OK
)
2287 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2288 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR
"; size %"
2289 PRId32
"; count %" PRId32
, address
, size
, count
);
2292 retval
= aarch64_virt2phys(target
, virt
, &phys
);
2293 if (retval
!= ERROR_OK
)
2296 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2297 TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
, virt
, phys
);
2300 retval
= aarch64_write_phys_memory(target
, address
, size
,
2304 retval
= aarch64_check_address(target
, address
);
2305 if (retval
!= ERROR_OK
)
2307 /* enable MMU as we could have disabled it for phys access */
2308 retval
= aarch64_mmu_modify(target
, 1);
2309 if (retval
!= ERROR_OK
)
2312 retval
= aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2317 static int aarch64_handle_target_request(void *priv
)
2319 struct target
*target
= priv
;
2320 struct armv8_common
*armv8
= target_to_armv8(target
);
2323 if (!target_was_examined(target
))
2325 if (!target
->dbg_msg_enabled
)
2328 if (target
->state
== TARGET_RUNNING
) {
2331 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2332 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2334 /* check if we have data */
2335 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2336 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2337 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2338 if (retval
== ERROR_OK
) {
2339 target_request(target
, request
);
2340 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2341 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2349 static int aarch64_examine_first(struct target
*target
)
2351 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2352 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2353 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2354 int retval
= ERROR_OK
;
2355 uint32_t pfr
, debug
, ctypr
, ttypr
, cpuid
;
2358 /* We do one extra read to ensure DAP is configured,
2359 * we call ahbap_debugport_init(swjdp) instead
2361 retval
= dap_dp_init(swjdp
);
2362 if (retval
!= ERROR_OK
)
2365 /* Search for the APB-AB - it is needed for access to debug registers */
2366 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2367 if (retval
!= ERROR_OK
) {
2368 LOG_ERROR("Could not find APB-AP for debug access");
2372 retval
= mem_ap_init(armv8
->debug_ap
);
2373 if (retval
!= ERROR_OK
) {
2374 LOG_ERROR("Could not initialize the APB-AP");
2378 armv8
->debug_ap
->memaccess_tck
= 80;
2380 /* Search for the AHB-AB */
2381 armv8
->memory_ap_available
= false;
2382 retval
= dap_find_ap(swjdp
, AP_TYPE_AHB_AP
, &armv8
->memory_ap
);
2383 if (retval
== ERROR_OK
) {
2384 retval
= mem_ap_init(armv8
->memory_ap
);
2385 if (retval
== ERROR_OK
)
2386 armv8
->memory_ap_available
= true;
2388 if (retval
!= ERROR_OK
) {
2389 /* AHB-AP not found or unavailable - use the CPU */
2390 LOG_DEBUG("No AHB-AP available for memory access");
2394 if (!target
->dbgbase_set
) {
2396 /* Get ROM Table base */
2398 int32_t coreidx
= target
->coreid
;
2399 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2400 if (retval
!= ERROR_OK
)
2402 /* Lookup 0x15 -- Processor DAP */
2403 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2404 &armv8
->debug_base
, &coreidx
);
2405 if (retval
!= ERROR_OK
)
2407 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
,
2408 coreidx
, armv8
->debug_base
);
2410 armv8
->debug_base
= target
->dbgbase
;
2412 LOG_DEBUG("Target ctibase is 0x%x", target
->ctibase
);
2413 if (target
->ctibase
== 0)
2414 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x1000;
2416 armv8
->cti_base
= target
->ctibase
;
2418 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2419 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
2420 if (retval
!= ERROR_OK
) {
2421 LOG_DEBUG("Examine %s failed", "oslock");
2425 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2426 armv8
->debug_base
+ 0x88, &cpuid
);
2427 LOG_DEBUG("0x88 = %x", cpuid
);
2429 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2430 armv8
->debug_base
+ 0x314, &cpuid
);
2431 LOG_DEBUG("0x314 = %x", cpuid
);
2433 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2434 armv8
->debug_base
+ 0x310, &cpuid
);
2435 LOG_DEBUG("0x310 = %x", cpuid
);
2436 if (retval
!= ERROR_OK
)
2439 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2440 armv8
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
2441 if (retval
!= ERROR_OK
) {
2442 LOG_DEBUG("Examine %s failed", "CPUID");
2446 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2447 armv8
->debug_base
+ CPUDBG_CTYPR
, &ctypr
);
2448 if (retval
!= ERROR_OK
) {
2449 LOG_DEBUG("Examine %s failed", "CTYPR");
2453 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2454 armv8
->debug_base
+ CPUDBG_TTYPR
, &ttypr
);
2455 if (retval
!= ERROR_OK
) {
2456 LOG_DEBUG("Examine %s failed", "TTYPR");
2460 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2461 armv8
->debug_base
+ ID_AA64PFR0_EL1
, &pfr
);
2462 if (retval
!= ERROR_OK
) {
2463 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2466 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2467 armv8
->debug_base
+ ID_AA64DFR0_EL1
, &debug
);
2468 if (retval
!= ERROR_OK
) {
2469 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2473 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2474 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
2475 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
2476 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32
, pfr
);
2477 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32
, debug
);
2479 armv8
->arm
.core_type
= ARM_MODE_MON
;
2480 armv8
->arm
.core_state
= ARM_STATE_AARCH64
;
2481 retval
= aarch64_dpm_setup(aarch64
, debug
);
2482 if (retval
!= ERROR_OK
)
2485 /* Setup Breakpoint Register Pairs */
2486 aarch64
->brp_num
= ((debug
>> 12) & 0x0F) + 1;
2487 aarch64
->brp_num_context
= ((debug
>> 28) & 0x0F) + 1;
2489 /* hack - no context bpt support yet */
2490 aarch64
->brp_num_context
= 0;
2492 aarch64
->brp_num_available
= aarch64
->brp_num
;
2493 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2494 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2495 aarch64
->brp_list
[i
].used
= 0;
2496 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2497 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2499 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2500 aarch64
->brp_list
[i
].value
= 0;
2501 aarch64
->brp_list
[i
].control
= 0;
2502 aarch64
->brp_list
[i
].BRPn
= i
;
2505 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2507 target_set_examined(target
);
2511 static int aarch64_examine(struct target
*target
)
2513 int retval
= ERROR_OK
;
2515 /* don't re-probe hardware after each reset */
2516 if (!target_was_examined(target
))
2517 retval
= aarch64_examine_first(target
);
2519 /* Configure core debug access */
2520 if (retval
== ERROR_OK
)
2521 retval
= aarch64_init_debug_access(target
);
2527 * Cortex-A8 target creation and initialization
2530 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2531 struct target
*target
)
2533 /* examine_first() does a bunch of this */
2537 static int aarch64_init_arch_info(struct target
*target
,
2538 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2540 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2541 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
2543 armv8
->arm
.dap
= dap
;
2545 /* Setup struct aarch64_common */
2546 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2547 /* tap has no dap initialized */
2549 tap
->dap
= dap_init();
2551 /* Leave (only) generic DAP stuff for debugport_init() */
2552 tap
->dap
->tap
= tap
;
2555 armv8
->arm
.dap
= tap
->dap
;
2557 aarch64
->fast_reg_read
= 0;
2559 /* register arch-specific functions */
2560 armv8
->examine_debug_reason
= NULL
;
2562 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2564 armv8
->pre_restore_context
= NULL
;
2566 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2568 /* REVISIT v7a setup should be in a v7a-specific routine */
2569 armv8_init_arch_info(target
, armv8
);
2570 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2575 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2577 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2579 aarch64
->armv8_common
.is_armv7r
= false;
2581 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2584 static int aarch64_mmu(struct target
*target
, int *enabled
)
2586 if (target
->state
!= TARGET_HALTED
) {
2587 LOG_ERROR("%s: target not halted", __func__
);
2588 return ERROR_TARGET_INVALID
;
2591 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2595 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2596 target_addr_t
*phys
)
2598 int retval
= ERROR_FAIL
;
2599 struct armv8_common
*armv8
= target_to_armv8(target
);
2600 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2601 uint8_t apsel
= swjdp
->apsel
;
2602 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2604 retval
= armv8_mmu_translate_va(target
,
2606 if (retval
!= ERROR_OK
)
2609 } else {/* use this method if armv8->memory_ap not selected
2610 * mmu must be enable in order to get a correct translation */
2611 retval
= aarch64_mmu_modify(target
, 1);
2612 if (retval
!= ERROR_OK
)
2614 retval
= armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2620 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2622 struct target
*target
= get_current_target(CMD_CTX
);
2623 struct armv8_common
*armv8
= target_to_armv8(target
);
2625 return armv8_handle_cache_info_command(CMD_CTX
,
2626 &armv8
->armv8_mmu
.armv8_cache
);
2630 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2632 struct target
*target
= get_current_target(CMD_CTX
);
2633 if (!target_was_examined(target
)) {
2634 LOG_ERROR("target not examined yet");
2638 return aarch64_init_debug_access(target
);
2640 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2642 struct target
*target
= get_current_target(CMD_CTX
);
2643 /* check target is an smp target */
2644 struct target_list
*head
;
2645 struct target
*curr
;
2646 head
= target
->head
;
2648 if (head
!= (struct target_list
*)NULL
) {
2649 while (head
!= (struct target_list
*)NULL
) {
2650 curr
= head
->target
;
2654 /* fixes the target display to the debugger */
2655 target
->gdb_service
->target
= target
;
2660 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2662 struct target
*target
= get_current_target(CMD_CTX
);
2663 struct target_list
*head
;
2664 struct target
*curr
;
2665 head
= target
->head
;
2666 if (head
!= (struct target_list
*)NULL
) {
2668 while (head
!= (struct target_list
*)NULL
) {
2669 curr
= head
->target
;
2677 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2679 struct target
*target
= get_current_target(CMD_CTX
);
2680 int retval
= ERROR_OK
;
2681 struct target_list
*head
;
2682 head
= target
->head
;
2683 if (head
!= (struct target_list
*)NULL
) {
2684 if (CMD_ARGC
== 1) {
2686 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2687 if (ERROR_OK
!= retval
)
2689 target
->gdb_service
->core
[1] = coreid
;
2692 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2693 , target
->gdb_service
->core
[1]);
2698 static const struct command_registration aarch64_exec_command_handlers
[] = {
2700 .name
= "cache_info",
2701 .handler
= aarch64_handle_cache_info_command
,
2702 .mode
= COMMAND_EXEC
,
2703 .help
= "display information about target caches",
2708 .handler
= aarch64_handle_dbginit_command
,
2709 .mode
= COMMAND_EXEC
,
2710 .help
= "Initialize core debug",
2713 { .name
= "smp_off",
2714 .handler
= aarch64_handle_smp_off_command
,
2715 .mode
= COMMAND_EXEC
,
2716 .help
= "Stop smp handling",
2721 .handler
= aarch64_handle_smp_on_command
,
2722 .mode
= COMMAND_EXEC
,
2723 .help
= "Restart smp handling",
2728 .handler
= aarch64_handle_smp_gdb_command
,
2729 .mode
= COMMAND_EXEC
,
2730 .help
= "display/fix current core played to gdb",
2735 COMMAND_REGISTRATION_DONE
2737 static const struct command_registration aarch64_command_handlers
[] = {
2739 .chain
= arm_command_handlers
,
2742 .chain
= armv8_command_handlers
,
2746 .mode
= COMMAND_ANY
,
2747 .help
= "Cortex-A command group",
2749 .chain
= aarch64_exec_command_handlers
,
2751 COMMAND_REGISTRATION_DONE
2754 struct target_type aarch64_target
= {
2757 .poll
= aarch64_poll
,
2758 .arch_state
= armv8_arch_state
,
2760 .halt
= aarch64_halt
,
2761 .resume
= aarch64_resume
,
2762 .step
= aarch64_step
,
2764 .assert_reset
= aarch64_assert_reset
,
2765 .deassert_reset
= aarch64_deassert_reset
,
2767 /* REVISIT allow exporting VFP3 registers ... */
2768 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2770 .read_memory
= aarch64_read_memory
,
2771 .write_memory
= aarch64_write_memory
,
2773 .checksum_memory
= arm_checksum_memory
,
2774 .blank_check_memory
= arm_blank_check_memory
,
2776 .run_algorithm
= armv4_5_run_algorithm
,
2778 .add_breakpoint
= aarch64_add_breakpoint
,
2779 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2780 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2781 .remove_breakpoint
= aarch64_remove_breakpoint
,
2782 .add_watchpoint
= NULL
,
2783 .remove_watchpoint
= NULL
,
2785 .commands
= aarch64_command_handlers
,
2786 .target_create
= aarch64_target_create
,
2787 .init_target
= aarch64_init_target
,
2788 .examine
= aarch64_examine
,
2790 .read_phys_memory
= aarch64_read_phys_memory
,
2791 .write_phys_memory
= aarch64_write_phys_memory
,
2793 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)