1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
49 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
50 uint32_t opcode
, uint32_t data
);
52 static int aarch64_restore_system_control_reg(struct target
*target
)
54 int retval
= ERROR_OK
;
56 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
57 struct armv8_common
*armv8
= target_to_armv8(target
);
59 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
60 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
63 switch (armv8
->arm
.core_mode
) {
67 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
70 aarch64
->system_control_reg
);
71 if (retval
!= ERROR_OK
)
76 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
79 aarch64
->system_control_reg
);
80 if (retval
!= ERROR_OK
)
85 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
88 aarch64
->system_control_reg
);
89 if (retval
!= ERROR_OK
)
93 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
99 /* check address before aarch64_apb read write access with mmu on
100 * remove apb predictible data abort */
101 static int aarch64_check_address(struct target
*target
, uint32_t address
)
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target
*target
, int enable
)
111 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
112 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
113 int retval
= ERROR_OK
;
116 /* if mmu enabled at target stop and mmu not enable */
117 if (!(aarch64
->system_control_reg
& 0x1U
)) {
118 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
121 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
122 aarch64
->system_control_reg_curr
|= 0x1U
;
123 switch (armv8
->arm
.core_mode
) {
127 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
130 aarch64
->system_control_reg_curr
);
131 if (retval
!= ERROR_OK
)
136 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
139 aarch64
->system_control_reg_curr
);
140 if (retval
!= ERROR_OK
)
145 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
148 aarch64
->system_control_reg_curr
);
149 if (retval
!= ERROR_OK
)
153 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
157 if (aarch64
->system_control_reg_curr
& 0x4U
) {
158 /* data cache is active */
159 aarch64
->system_control_reg_curr
&= ~0x4U
;
160 /* flush data cache armv7 function to be called */
161 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
162 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
164 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
165 aarch64
->system_control_reg_curr
&= ~0x1U
;
166 switch (armv8
->arm
.core_mode
) {
170 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
173 aarch64
->system_control_reg_curr
);
174 if (retval
!= ERROR_OK
)
179 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
182 aarch64
->system_control_reg_curr
);
183 if (retval
!= ERROR_OK
)
188 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
191 aarch64
->system_control_reg_curr
);
192 if (retval
!= ERROR_OK
)
196 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
205 * Basic debug access, very low level assumes state is saved
207 static int aarch64_init_debug_access(struct target
*target
)
209 struct armv8_common
*armv8
= target_to_armv8(target
);
215 /* Clear Sticky Power Down status Bit in PRSR to enable access to
216 the registers in the Core Power Domain */
217 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
218 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
219 if (retval
!= ERROR_OK
)
223 * Static CTI configuration:
224 * Channel 0 -> trigger outputs HALT request to PE
225 * Channel 1 -> trigger outputs Resume request to PE
226 * Gate all channel trigger events from entering the CTM
230 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
231 armv8
->cti_base
+ CTI_CTR
, 1);
232 /* By default, gate all channel triggers to and from the CTM */
233 if (retval
== ERROR_OK
)
234 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
235 armv8
->cti_base
+ CTI_GATE
, 0);
236 /* output halt requests to PE on channel 0 trigger */
237 if (retval
== ERROR_OK
)
238 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
239 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
240 /* output restart requests to PE on channel 1 trigger */
241 if (retval
== ERROR_OK
)
242 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
243 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
244 if (retval
!= ERROR_OK
)
247 /* Resync breakpoint registers */
249 /* Since this is likely called from init or reset, update target state information*/
250 return aarch64_poll(target
);
253 /* To reduce needless round-trips, pass in a pointer to the current
254 * DSCR value. Initialize it to zero if you just need to know the
255 * value on return from this function; or DSCR_ITE if you
256 * happen to know that no instruction is pending.
258 static int aarch64_exec_opcode(struct target
*target
,
259 uint32_t opcode
, uint32_t *dscr_p
)
263 struct armv8_common
*armv8
= target_to_armv8(target
);
264 dscr
= dscr_p
? *dscr_p
: 0;
266 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
268 /* Wait for InstrCompl bit to be set */
269 long long then
= timeval_ms();
270 while ((dscr
& DSCR_ITE
) == 0) {
271 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
272 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
273 if (retval
!= ERROR_OK
) {
274 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
277 if (timeval_ms() > then
+ 1000) {
278 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
283 retval
= mem_ap_write_u32(armv8
->debug_ap
,
284 armv8
->debug_base
+ CPUV8_DBG_ITR
, opcode
);
285 if (retval
!= ERROR_OK
)
290 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
291 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
292 if (retval
!= ERROR_OK
) {
293 LOG_ERROR("Could not read DSCR register");
296 if (timeval_ms() > then
+ 1000) {
297 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
300 } while ((dscr
& DSCR_ITE
) == 0); /* Wait for InstrCompl bit to be set */
308 /* Write to memory mapped registers directly with no cache or mmu handling */
309 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
314 struct armv8_common
*armv8
= target_to_armv8(target
);
316 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
322 * AARCH64 implementation of Debug Programmer's Model
324 * NOTE the invariant: these routines return with DSCR_ITE set,
325 * so there's no need to poll for it before executing an instruction.
327 * NOTE that in several of these cases the "stall" mode might be useful.
328 * It'd let us queue a few operations together... prepare/finish might
329 * be the places to enable/disable that mode.
332 static inline struct aarch64_common
*dpm_to_a8(struct arm_dpm
*dpm
)
334 return container_of(dpm
, struct aarch64_common
, armv8_common
.dpm
);
337 static int aarch64_write_dcc(struct armv8_common
*armv8
, uint32_t data
)
339 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
340 return mem_ap_write_u32(armv8
->debug_ap
,
341 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
344 static int aarch64_write_dcc_64(struct armv8_common
*armv8
, uint64_t data
)
347 LOG_DEBUG("write DCC Low word0x%08" PRIx32
, (unsigned)data
);
348 LOG_DEBUG("write DCC High word 0x%08" PRIx32
, (unsigned)(data
>> 32));
349 ret
= mem_ap_write_u32(armv8
->debug_ap
,
350 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
351 ret
+= mem_ap_write_u32(armv8
->debug_ap
,
352 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, data
>> 32);
356 static int aarch64_read_dcc(struct armv8_common
*armv8
, uint32_t *data
,
359 uint32_t dscr
= DSCR_ITE
;
365 /* Wait for DTRRXfull */
366 long long then
= timeval_ms();
367 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
368 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
369 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
371 if (retval
!= ERROR_OK
)
373 if (timeval_ms() > then
+ 1000) {
374 LOG_ERROR("Timeout waiting for read dcc");
379 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
380 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
382 if (retval
!= ERROR_OK
)
384 LOG_DEBUG("read DCC 0x%08" PRIx32
, *data
);
392 static int aarch64_read_dcc_64(struct armv8_common
*armv8
, uint64_t *data
,
395 uint32_t dscr
= DSCR_ITE
;
402 /* Wait for DTRRXfull */
403 long long then
= timeval_ms();
404 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
405 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
406 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
408 if (retval
!= ERROR_OK
)
410 if (timeval_ms() > then
+ 1000) {
411 LOG_ERROR("Timeout waiting for read dcc");
416 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
417 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
419 if (retval
!= ERROR_OK
)
422 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
423 armv8
->debug_base
+ CPUV8_DBG_DTRRX
,
425 if (retval
!= ERROR_OK
)
428 *data
= *(uint32_t *)data
| (uint64_t)higher
<< 32;
429 LOG_DEBUG("read DCC 0x%16.16" PRIx64
, *data
);
437 static int aarch64_dpm_prepare(struct arm_dpm
*dpm
)
439 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
443 /* set up invariant: INSTR_COMP is set after ever DPM operation */
444 long long then
= timeval_ms();
446 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
447 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DSCR
,
449 if (retval
!= ERROR_OK
)
451 if ((dscr
& DSCR_ITE
) != 0)
453 if (timeval_ms() > then
+ 1000) {
454 LOG_ERROR("Timeout waiting for dpm prepare");
459 /* this "should never happen" ... */
460 if (dscr
& DSCR_DTR_RX_FULL
) {
461 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
463 retval
= mem_ap_read_u32(a8
->armv8_common
.debug_ap
,
464 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DTRRX
, &dscr
);
465 if (retval
!= ERROR_OK
)
468 /* Clear sticky error */
469 retval
= mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
470 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
471 if (retval
!= ERROR_OK
)
478 static int aarch64_dpm_finish(struct arm_dpm
*dpm
)
480 /* REVISIT what could be done here? */
484 static int aarch64_instr_execute(struct arm_dpm
*dpm
,
487 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
488 uint32_t dscr
= DSCR_ITE
;
490 return aarch64_exec_opcode(
491 a8
->armv8_common
.arm
.target
,
496 static int aarch64_instr_write_data_dcc(struct arm_dpm
*dpm
,
497 uint32_t opcode
, uint32_t data
)
499 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
501 uint32_t dscr
= DSCR_ITE
;
503 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
504 if (retval
!= ERROR_OK
)
507 return aarch64_exec_opcode(
508 a8
->armv8_common
.arm
.target
,
513 static int aarch64_instr_write_data_dcc_64(struct arm_dpm
*dpm
,
514 uint32_t opcode
, uint64_t data
)
516 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
518 uint32_t dscr
= DSCR_ITE
;
520 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
521 if (retval
!= ERROR_OK
)
524 return aarch64_exec_opcode(
525 a8
->armv8_common
.arm
.target
,
530 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
531 uint32_t opcode
, uint32_t data
)
533 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
534 uint32_t dscr
= DSCR_ITE
;
537 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
538 if (retval
!= ERROR_OK
)
541 retval
= aarch64_exec_opcode(
542 a8
->armv8_common
.arm
.target
,
543 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 0),
545 if (retval
!= ERROR_OK
)
548 /* then the opcode, taking data from R0 */
549 retval
= aarch64_exec_opcode(
550 a8
->armv8_common
.arm
.target
,
557 static int aarch64_instr_write_data_r0_64(struct arm_dpm
*dpm
,
558 uint32_t opcode
, uint64_t data
)
560 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
561 uint32_t dscr
= DSCR_ITE
;
564 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
565 if (retval
!= ERROR_OK
)
568 retval
= aarch64_exec_opcode(
569 a8
->armv8_common
.arm
.target
,
570 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0),
572 if (retval
!= ERROR_OK
)
575 /* then the opcode, taking data from R0 */
576 retval
= aarch64_exec_opcode(
577 a8
->armv8_common
.arm
.target
,
584 static int aarch64_instr_cpsr_sync(struct arm_dpm
*dpm
)
586 struct target
*target
= dpm
->arm
->target
;
587 uint32_t dscr
= DSCR_ITE
;
589 /* "Prefetch flush" after modifying execution status in CPSR */
590 return aarch64_exec_opcode(target
,
595 static int aarch64_instr_read_data_dcc(struct arm_dpm
*dpm
,
596 uint32_t opcode
, uint32_t *data
)
598 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
600 uint32_t dscr
= DSCR_ITE
;
602 /* the opcode, writing data to DCC */
603 retval
= aarch64_exec_opcode(
604 a8
->armv8_common
.arm
.target
,
607 if (retval
!= ERROR_OK
)
610 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
613 static int aarch64_instr_read_data_dcc_64(struct arm_dpm
*dpm
,
614 uint32_t opcode
, uint64_t *data
)
616 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
618 uint32_t dscr
= DSCR_ITE
;
620 /* the opcode, writing data to DCC */
621 retval
= aarch64_exec_opcode(
622 a8
->armv8_common
.arm
.target
,
625 if (retval
!= ERROR_OK
)
628 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
631 static int aarch64_instr_read_data_r0(struct arm_dpm
*dpm
,
632 uint32_t opcode
, uint32_t *data
)
634 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
635 uint32_t dscr
= DSCR_ITE
;
638 /* the opcode, writing data to R0 */
639 retval
= aarch64_exec_opcode(
640 a8
->armv8_common
.arm
.target
,
643 if (retval
!= ERROR_OK
)
646 /* write R0 to DCC */
647 retval
= aarch64_exec_opcode(
648 a8
->armv8_common
.arm
.target
,
649 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 0), /* msr dbgdtr_el0, x0 */
651 if (retval
!= ERROR_OK
)
654 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
657 static int aarch64_instr_read_data_r0_64(struct arm_dpm
*dpm
,
658 uint32_t opcode
, uint64_t *data
)
660 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
661 uint32_t dscr
= DSCR_ITE
;
664 /* the opcode, writing data to R0 */
665 retval
= aarch64_exec_opcode(
666 a8
->armv8_common
.arm
.target
,
669 if (retval
!= ERROR_OK
)
672 /* write R0 to DCC */
673 retval
= aarch64_exec_opcode(
674 a8
->armv8_common
.arm
.target
,
675 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), /* msr dbgdtr_el0, x0 */
677 if (retval
!= ERROR_OK
)
680 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
683 static int aarch64_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
684 uint32_t addr
, uint32_t control
)
686 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
687 uint32_t vr
= a8
->armv8_common
.debug_base
;
688 uint32_t cr
= a8
->armv8_common
.debug_base
;
692 case 0 ... 15: /* breakpoints */
693 vr
+= CPUV8_DBG_BVR_BASE
;
694 cr
+= CPUV8_DBG_BCR_BASE
;
696 case 16 ... 31: /* watchpoints */
697 vr
+= CPUV8_DBG_WVR_BASE
;
698 cr
+= CPUV8_DBG_WCR_BASE
;
707 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
708 (unsigned) vr
, (unsigned) cr
);
710 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
712 if (retval
!= ERROR_OK
)
714 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
719 static int aarch64_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
721 struct aarch64_common
*a
= dpm_to_a8(dpm
);
726 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_BCR_BASE
;
729 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_WCR_BASE
;
737 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr
);
739 /* clear control register */
740 return aarch64_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
744 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
746 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
749 dpm
->arm
= &a8
->armv8_common
.arm
;
752 dpm
->prepare
= aarch64_dpm_prepare
;
753 dpm
->finish
= aarch64_dpm_finish
;
755 dpm
->instr_execute
= aarch64_instr_execute
;
756 dpm
->instr_write_data_dcc
= aarch64_instr_write_data_dcc
;
757 dpm
->instr_write_data_dcc_64
= aarch64_instr_write_data_dcc_64
;
758 dpm
->instr_write_data_r0
= aarch64_instr_write_data_r0
;
759 dpm
->instr_write_data_r0_64
= aarch64_instr_write_data_r0_64
;
760 dpm
->instr_cpsr_sync
= aarch64_instr_cpsr_sync
;
762 dpm
->instr_read_data_dcc
= aarch64_instr_read_data_dcc
;
763 dpm
->instr_read_data_dcc_64
= aarch64_instr_read_data_dcc_64
;
764 dpm
->instr_read_data_r0
= aarch64_instr_read_data_r0
;
765 dpm
->instr_read_data_r0_64
= aarch64_instr_read_data_r0_64
;
767 dpm
->arm_reg_current
= armv8_reg_current
;
769 dpm
->bpwp_enable
= aarch64_bpwp_enable
;
770 dpm
->bpwp_disable
= aarch64_bpwp_disable
;
772 retval
= armv8_dpm_setup(dpm
);
773 if (retval
== ERROR_OK
)
774 retval
= armv8_dpm_initialize(dpm
);
778 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
780 struct target_list
*head
;
784 while (head
!= (struct target_list
*)NULL
) {
786 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
792 static int aarch64_halt(struct target
*target
);
794 static int aarch64_halt_smp(struct target
*target
)
796 int retval
= ERROR_OK
;
797 struct target_list
*head
= target
->head
;
799 while (head
!= (struct target_list
*)NULL
) {
800 struct target
*curr
= head
->target
;
801 struct armv8_common
*armv8
= target_to_armv8(curr
);
803 /* open the gate for channel 0 to let HALT requests pass to the CTM */
805 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
806 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
807 if (retval
!= ERROR_OK
)
813 /* halt the target PE */
814 if (retval
== ERROR_OK
)
815 retval
= aarch64_halt(target
);
820 static int update_halt_gdb(struct target
*target
)
823 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
824 target
->gdb_service
->target
= target
;
825 target
->gdb_service
->core
[0] = target
->coreid
;
826 retval
+= aarch64_halt_smp(target
);
832 * Cortex-A8 Run control
835 static int aarch64_poll(struct target
*target
)
837 int retval
= ERROR_OK
;
839 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
840 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
841 enum target_state prev_target_state
= target
->state
;
842 /* toggle to another core is done by gdb as follow */
843 /* maint packet J core_id */
845 /* the next polling trigger an halt event sent to gdb */
846 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
847 (target
->gdb_service
) &&
848 (target
->gdb_service
->target
== NULL
)) {
849 target
->gdb_service
->target
=
850 get_aarch64(target
, target
->gdb_service
->core
[1]);
851 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
854 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
855 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
856 if (retval
!= ERROR_OK
)
858 aarch64
->cpudbg_dscr
= dscr
;
860 if (DSCR_RUN_MODE(dscr
) == 0x3) {
861 if (prev_target_state
!= TARGET_HALTED
) {
862 /* We have a halting debug event */
863 LOG_DEBUG("Target halted");
864 target
->state
= TARGET_HALTED
;
865 if ((prev_target_state
== TARGET_RUNNING
)
866 || (prev_target_state
== TARGET_UNKNOWN
)
867 || (prev_target_state
== TARGET_RESET
)) {
868 retval
= aarch64_debug_entry(target
);
869 if (retval
!= ERROR_OK
)
872 retval
= update_halt_gdb(target
);
873 if (retval
!= ERROR_OK
)
876 target_call_event_callbacks(target
,
877 TARGET_EVENT_HALTED
);
879 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
882 retval
= aarch64_debug_entry(target
);
883 if (retval
!= ERROR_OK
)
886 retval
= update_halt_gdb(target
);
887 if (retval
!= ERROR_OK
)
891 target_call_event_callbacks(target
,
892 TARGET_EVENT_DEBUG_HALTED
);
896 target
->state
= TARGET_RUNNING
;
901 static int aarch64_halt(struct target
*target
)
903 int retval
= ERROR_OK
;
905 struct armv8_common
*armv8
= target_to_armv8(target
);
908 * add HDE in halting debug mode
910 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
911 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
912 if (retval
== ERROR_OK
)
913 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
914 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
| DSCR_HDE
);
915 if (retval
!= ERROR_OK
)
918 /* trigger an event on channel 0, this outputs a halt request to the PE */
919 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
920 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
921 if (retval
!= ERROR_OK
)
924 long long then
= timeval_ms();
926 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
927 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
928 if (retval
!= ERROR_OK
)
930 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
932 if (timeval_ms() > then
+ 1000) {
933 LOG_ERROR("Timeout waiting for halt");
938 target
->debug_reason
= DBG_REASON_DBGRQ
;
943 static int aarch64_internal_restore(struct target
*target
, int current
,
944 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
946 struct armv8_common
*armv8
= target_to_armv8(target
);
947 struct arm
*arm
= &armv8
->arm
;
951 if (!debug_execution
)
952 target_free_all_working_areas(target
);
954 /* current = 1: continue on current pc, otherwise continue at <address> */
955 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
957 resume_pc
= *address
;
959 *address
= resume_pc
;
961 /* Make sure that the Armv7 gdb thumb fixups does not
962 * kill the return address
964 switch (arm
->core_state
) {
966 resume_pc
&= 0xFFFFFFFC;
968 case ARM_STATE_AARCH64
:
969 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
971 case ARM_STATE_THUMB
:
972 case ARM_STATE_THUMB_EE
:
973 /* When the return address is loaded into PC
974 * bit 0 must be 1 to stay in Thumb state
978 case ARM_STATE_JAZELLE
:
979 LOG_ERROR("How do I resume into Jazelle state??");
982 LOG_DEBUG("resume pc = 0x%16" PRIx64
, resume_pc
);
983 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
986 dpmv8_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
988 /* called it now before restoring context because it uses cpu
989 * register r0 for restoring system control register */
990 retval
= aarch64_restore_system_control_reg(target
);
991 if (retval
!= ERROR_OK
)
993 retval
= aarch64_restore_context(target
, handle_breakpoints
);
994 if (retval
!= ERROR_OK
)
996 target
->debug_reason
= DBG_REASON_NOTHALTED
;
997 target
->state
= TARGET_RUNNING
;
999 /* registers are now invalid */
1000 register_cache_invalidate(arm
->core_cache
);
1003 /* the front-end may request us not to handle breakpoints */
1004 if (handle_breakpoints
) {
1005 /* Single step past breakpoint at current address */
1006 breakpoint
= breakpoint_find(target
, resume_pc
);
1008 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
1009 cortex_m3_unset_breakpoint(target
, breakpoint
);
1010 cortex_m3_single_step_core(target
);
1011 cortex_m3_set_breakpoint(target
, breakpoint
);
1019 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
1021 struct armv8_common
*armv8
= target_to_armv8(target
);
1022 struct arm
*arm
= &armv8
->arm
;
1026 * * Restart core and wait for it to be started. Clear ITRen and sticky
1027 * * exception flags: see ARMv7 ARM, C5.9.
1029 * REVISIT: for single stepping, we probably want to
1030 * disable IRQs by default, with optional override...
1033 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1034 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1035 if (retval
!= ERROR_OK
)
1038 if ((dscr
& DSCR_ITE
) == 0)
1039 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1041 /* make sure to acknowledge the halt event before resuming */
1042 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1043 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
1046 * open the CTI gate for channel 1 so that the restart events
1047 * get passed along to all PEs
1049 if (retval
== ERROR_OK
)
1050 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1051 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
1052 if (retval
!= ERROR_OK
)
1056 /* trigger an event on channel 1, generates a restart request to the PE */
1057 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1058 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
1059 if (retval
!= ERROR_OK
)
1062 long long then
= timeval_ms();
1064 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1065 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1066 if (retval
!= ERROR_OK
)
1068 if ((dscr
& DSCR_HDE
) != 0)
1070 if (timeval_ms() > then
+ 1000) {
1071 LOG_ERROR("Timeout waiting for resume");
1077 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1078 target
->state
= TARGET_RUNNING
;
1080 /* registers are now invalid */
1081 register_cache_invalidate(arm
->core_cache
);
1086 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
1089 struct target_list
*head
;
1090 struct target
*curr
;
1092 head
= target
->head
;
1093 while (head
!= (struct target_list
*)NULL
) {
1094 curr
= head
->target
;
1095 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
1096 /* resume current address , not in step mode */
1097 retval
+= aarch64_internal_restore(curr
, 1, &address
,
1098 handle_breakpoints
, 0);
1099 retval
+= aarch64_internal_restart(curr
, true);
1107 static int aarch64_resume(struct target
*target
, int current
,
1108 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1111 uint64_t addr
= address
;
1113 /* dummy resume for smp toggle in order to reduce gdb impact */
1114 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
1115 /* simulate a start and halt of target */
1116 target
->gdb_service
->target
= NULL
;
1117 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
1118 /* fake resume at next poll we play the target core[1], see poll*/
1119 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1122 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
1125 target
->gdb_service
->core
[0] = -1;
1126 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
1127 if (retval
!= ERROR_OK
)
1130 aarch64_internal_restart(target
, false);
1132 if (!debug_execution
) {
1133 target
->state
= TARGET_RUNNING
;
1134 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1135 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
1137 target
->state
= TARGET_DEBUG_RUNNING
;
1138 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1139 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
1145 static int aarch64_debug_entry(struct target
*target
)
1147 int retval
= ERROR_OK
;
1148 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1149 struct armv8_common
*armv8
= target_to_armv8(target
);
1151 LOG_DEBUG("dscr = 0x%08" PRIx32
, aarch64
->cpudbg_dscr
);
1153 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1154 * imprecise data aborts get discarded by issuing a Data
1155 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1158 /* make sure to clear all sticky errors */
1159 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1160 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1161 if (retval
!= ERROR_OK
)
1164 /* Examine debug reason */
1165 armv8_dpm_report_dscr(&armv8
->dpm
, aarch64
->cpudbg_dscr
);
1167 /* save address of instruction that triggered the watchpoint? */
1168 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
1172 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1173 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
1175 if (retval
!= ERROR_OK
)
1178 wfar
= (wfar
<< 32);
1179 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1180 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
1182 if (retval
!= ERROR_OK
)
1185 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
1188 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1190 if (armv8
->post_debug_entry
) {
1191 retval
= armv8
->post_debug_entry(target
);
1192 if (retval
!= ERROR_OK
)
1199 static int aarch64_post_debug_entry(struct target
*target
)
1201 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1202 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1205 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1206 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1207 switch (armv8
->arm
.core_mode
) {
1211 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1212 0, 0, /* op1, op2 */
1213 1, 0, /* CRn, CRm */
1214 &aarch64
->system_control_reg
);
1215 if (retval
!= ERROR_OK
)
1220 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1221 4, 0, /* op1, op2 */
1222 1, 0, /* CRn, CRm */
1223 &aarch64
->system_control_reg
);
1224 if (retval
!= ERROR_OK
)
1229 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1230 6, 0, /* op1, op2 */
1231 1, 0, /* CRn, CRm */
1232 &aarch64
->system_control_reg
);
1233 if (retval
!= ERROR_OK
)
1237 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
1239 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1240 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1242 if (armv8
->armv8_mmu
.armv8_cache
.ctype
== -1)
1243 armv8_identify_cache(target
);
1245 armv8
->armv8_mmu
.mmu_enabled
=
1246 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1247 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1248 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1249 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1250 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1251 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
1255 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
1257 struct armv8_common
*armv8
= target_to_armv8(target
);
1261 int retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1262 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1263 if (ERROR_OK
!= retval
)
1266 /* clear bitfield */
1269 dscr
|= value
& bit_mask
;
1271 /* write new DSCR */
1272 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1273 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1277 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1278 int handle_breakpoints
)
1280 struct armv8_common
*armv8
= target_to_armv8(target
);
1284 if (target
->state
!= TARGET_HALTED
) {
1285 LOG_WARNING("target not halted");
1286 return ERROR_TARGET_NOT_HALTED
;
1289 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1290 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1291 if (retval
!= ERROR_OK
)
1294 /* make sure EDECR.SS is not set when restoring the register */
1297 /* set EDECR.SS to enter hardware step mode */
1298 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1299 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1300 if (retval
!= ERROR_OK
)
1303 /* disable interrupts while stepping */
1304 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1305 if (retval
!= ERROR_OK
)
1308 /* resume the target */
1309 retval
= aarch64_resume(target
, current
, address
, 0, 0);
1310 if (retval
!= ERROR_OK
)
1313 long long then
= timeval_ms();
1314 while (target
->state
!= TARGET_HALTED
) {
1315 retval
= aarch64_poll(target
);
1316 if (retval
!= ERROR_OK
)
1318 if (timeval_ms() > then
+ 1000) {
1319 LOG_ERROR("timeout waiting for target halt");
1325 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1326 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1327 if (retval
!= ERROR_OK
)
1330 /* restore interrupts */
1331 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1332 if (retval
!= ERROR_OK
)
1338 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1340 struct armv8_common
*armv8
= target_to_armv8(target
);
1344 if (armv8
->pre_restore_context
)
1345 armv8
->pre_restore_context(target
);
1347 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1352 * Cortex-A8 Breakpoint and watchpoint functions
1355 /* Setup hardware Breakpoint Register Pair */
1356 static int aarch64_set_breakpoint(struct target
*target
,
1357 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1362 uint8_t byte_addr_select
= 0x0F;
1363 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1364 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1365 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1368 if (breakpoint
->set
) {
1369 LOG_WARNING("breakpoint already set");
1373 if (breakpoint
->type
== BKPT_HARD
) {
1375 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1377 if (brp_i
>= aarch64
->brp_num
) {
1378 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1379 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1381 breakpoint
->set
= brp_i
+ 1;
1382 if (breakpoint
->length
== 2)
1383 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1384 control
= ((matchmode
& 0x7) << 20)
1386 | (byte_addr_select
<< 5)
1388 brp_list
[brp_i
].used
= 1;
1389 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1390 brp_list
[brp_i
].control
= control
;
1391 bpt_value
= brp_list
[brp_i
].value
;
1393 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1394 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1395 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1396 if (retval
!= ERROR_OK
)
1398 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1399 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1400 (uint32_t)(bpt_value
>> 32));
1401 if (retval
!= ERROR_OK
)
1404 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1405 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1406 brp_list
[brp_i
].control
);
1407 if (retval
!= ERROR_OK
)
1409 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1410 brp_list
[brp_i
].control
,
1411 brp_list
[brp_i
].value
);
1413 } else if (breakpoint
->type
== BKPT_SOFT
) {
1416 buf_set_u32(code
, 0, 32, ARMV8_HLT(0x11));
1417 retval
= target_read_memory(target
,
1418 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1419 breakpoint
->length
, 1,
1420 breakpoint
->orig_instr
);
1421 if (retval
!= ERROR_OK
)
1424 armv8_cache_d_inner_flush_virt(armv8
,
1425 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1426 breakpoint
->length
);
1428 retval
= target_write_memory(target
,
1429 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1430 breakpoint
->length
, 1, code
);
1431 if (retval
!= ERROR_OK
)
1434 armv8_cache_d_inner_flush_virt(armv8
,
1435 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1436 breakpoint
->length
);
1438 armv8_cache_i_inner_inval_virt(armv8
,
1439 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1440 breakpoint
->length
);
1442 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1445 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1446 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1447 /* Ensure that halting debug mode is enable */
1448 dscr
= dscr
| DSCR_HDE
;
1449 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1450 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1451 if (retval
!= ERROR_OK
) {
1452 LOG_DEBUG("Failed to set DSCR.HDE");
1459 static int aarch64_set_context_breakpoint(struct target
*target
,
1460 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1462 int retval
= ERROR_FAIL
;
1465 uint8_t byte_addr_select
= 0x0F;
1466 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1467 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1468 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1470 if (breakpoint
->set
) {
1471 LOG_WARNING("breakpoint already set");
1474 /*check available context BRPs*/
1475 while ((brp_list
[brp_i
].used
||
1476 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1479 if (brp_i
>= aarch64
->brp_num
) {
1480 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1484 breakpoint
->set
= brp_i
+ 1;
1485 control
= ((matchmode
& 0x7) << 20)
1487 | (byte_addr_select
<< 5)
1489 brp_list
[brp_i
].used
= 1;
1490 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1491 brp_list
[brp_i
].control
= control
;
1492 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1493 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1494 brp_list
[brp_i
].value
);
1495 if (retval
!= ERROR_OK
)
1497 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1498 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1499 brp_list
[brp_i
].control
);
1500 if (retval
!= ERROR_OK
)
1502 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1503 brp_list
[brp_i
].control
,
1504 brp_list
[brp_i
].value
);
1509 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1511 int retval
= ERROR_FAIL
;
1512 int brp_1
= 0; /* holds the contextID pair */
1513 int brp_2
= 0; /* holds the IVA pair */
1514 uint32_t control_CTX
, control_IVA
;
1515 uint8_t CTX_byte_addr_select
= 0x0F;
1516 uint8_t IVA_byte_addr_select
= 0x0F;
1517 uint8_t CTX_machmode
= 0x03;
1518 uint8_t IVA_machmode
= 0x01;
1519 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1520 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1521 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1523 if (breakpoint
->set
) {
1524 LOG_WARNING("breakpoint already set");
1527 /*check available context BRPs*/
1528 while ((brp_list
[brp_1
].used
||
1529 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1532 printf("brp(CTX) found num: %d\n", brp_1
);
1533 if (brp_1
>= aarch64
->brp_num
) {
1534 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1538 while ((brp_list
[brp_2
].used
||
1539 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1542 printf("brp(IVA) found num: %d\n", brp_2
);
1543 if (brp_2
>= aarch64
->brp_num
) {
1544 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1548 breakpoint
->set
= brp_1
+ 1;
1549 breakpoint
->linked_BRP
= brp_2
;
1550 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1553 | (CTX_byte_addr_select
<< 5)
1555 brp_list
[brp_1
].used
= 1;
1556 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1557 brp_list
[brp_1
].control
= control_CTX
;
1558 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1559 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1560 brp_list
[brp_1
].value
);
1561 if (retval
!= ERROR_OK
)
1563 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1564 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1565 brp_list
[brp_1
].control
);
1566 if (retval
!= ERROR_OK
)
1569 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1572 | (IVA_byte_addr_select
<< 5)
1574 brp_list
[brp_2
].used
= 1;
1575 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1576 brp_list
[brp_2
].control
= control_IVA
;
1577 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1578 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1579 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1580 if (retval
!= ERROR_OK
)
1582 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1583 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1584 brp_list
[brp_2
].value
>> 32);
1585 if (retval
!= ERROR_OK
)
1587 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1588 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1589 brp_list
[brp_2
].control
);
1590 if (retval
!= ERROR_OK
)
1596 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1599 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1600 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1601 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1603 if (!breakpoint
->set
) {
1604 LOG_WARNING("breakpoint not set");
1608 if (breakpoint
->type
== BKPT_HARD
) {
1609 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1610 int brp_i
= breakpoint
->set
- 1;
1611 int brp_j
= breakpoint
->linked_BRP
;
1612 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1613 LOG_DEBUG("Invalid BRP number in breakpoint");
1616 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1617 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1618 brp_list
[brp_i
].used
= 0;
1619 brp_list
[brp_i
].value
= 0;
1620 brp_list
[brp_i
].control
= 0;
1621 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1622 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1623 brp_list
[brp_i
].control
);
1624 if (retval
!= ERROR_OK
)
1626 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1627 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1628 (uint32_t)brp_list
[brp_i
].value
);
1629 if (retval
!= ERROR_OK
)
1631 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1632 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1633 (uint32_t)brp_list
[brp_i
].value
);
1634 if (retval
!= ERROR_OK
)
1636 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1637 LOG_DEBUG("Invalid BRP number in breakpoint");
1640 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1641 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1642 brp_list
[brp_j
].used
= 0;
1643 brp_list
[brp_j
].value
= 0;
1644 brp_list
[brp_j
].control
= 0;
1645 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1646 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1647 brp_list
[brp_j
].control
);
1648 if (retval
!= ERROR_OK
)
1650 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1651 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1652 (uint32_t)brp_list
[brp_j
].value
);
1653 if (retval
!= ERROR_OK
)
1655 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1656 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1657 (uint32_t)brp_list
[brp_j
].value
);
1658 if (retval
!= ERROR_OK
)
1661 breakpoint
->linked_BRP
= 0;
1662 breakpoint
->set
= 0;
1666 int brp_i
= breakpoint
->set
- 1;
1667 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1668 LOG_DEBUG("Invalid BRP number in breakpoint");
1671 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1672 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1673 brp_list
[brp_i
].used
= 0;
1674 brp_list
[brp_i
].value
= 0;
1675 brp_list
[brp_i
].control
= 0;
1676 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1677 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1678 brp_list
[brp_i
].control
);
1679 if (retval
!= ERROR_OK
)
1681 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1682 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1683 brp_list
[brp_i
].value
);
1684 if (retval
!= ERROR_OK
)
1687 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1688 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1689 (uint32_t)brp_list
[brp_i
].value
);
1690 if (retval
!= ERROR_OK
)
1692 breakpoint
->set
= 0;
1696 /* restore original instruction (kept in target endianness) */
1698 armv8_cache_d_inner_flush_virt(armv8
,
1699 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1700 breakpoint
->length
);
1702 if (breakpoint
->length
== 4) {
1703 retval
= target_write_memory(target
,
1704 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1705 4, 1, breakpoint
->orig_instr
);
1706 if (retval
!= ERROR_OK
)
1709 retval
= target_write_memory(target
,
1710 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1711 2, 1, breakpoint
->orig_instr
);
1712 if (retval
!= ERROR_OK
)
1716 armv8_cache_d_inner_flush_virt(armv8
,
1717 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1718 breakpoint
->length
);
1720 armv8_cache_i_inner_inval_virt(armv8
,
1721 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1722 breakpoint
->length
);
1724 breakpoint
->set
= 0;
1729 static int aarch64_add_breakpoint(struct target
*target
,
1730 struct breakpoint
*breakpoint
)
1732 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1734 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1735 LOG_INFO("no hardware breakpoint available");
1736 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1739 if (breakpoint
->type
== BKPT_HARD
)
1740 aarch64
->brp_num_available
--;
1742 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1745 static int aarch64_add_context_breakpoint(struct target
*target
,
1746 struct breakpoint
*breakpoint
)
1748 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1750 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1751 LOG_INFO("no hardware breakpoint available");
1752 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1755 if (breakpoint
->type
== BKPT_HARD
)
1756 aarch64
->brp_num_available
--;
1758 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1761 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1762 struct breakpoint
*breakpoint
)
1764 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1766 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1767 LOG_INFO("no hardware breakpoint available");
1768 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1771 if (breakpoint
->type
== BKPT_HARD
)
1772 aarch64
->brp_num_available
--;
1774 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1778 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1780 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1783 /* It is perfectly possible to remove breakpoints while the target is running */
1784 if (target
->state
!= TARGET_HALTED
) {
1785 LOG_WARNING("target not halted");
1786 return ERROR_TARGET_NOT_HALTED
;
1790 if (breakpoint
->set
) {
1791 aarch64_unset_breakpoint(target
, breakpoint
);
1792 if (breakpoint
->type
== BKPT_HARD
)
1793 aarch64
->brp_num_available
++;
1800 * Cortex-A8 Reset functions
1803 static int aarch64_assert_reset(struct target
*target
)
1805 struct armv8_common
*armv8
= target_to_armv8(target
);
1809 /* FIXME when halt is requested, make it work somehow... */
1811 /* Issue some kind of warm reset. */
1812 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1813 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1814 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1815 /* REVISIT handle "pulls" cases, if there's
1816 * hardware that needs them to work.
1818 jtag_add_reset(0, 1);
1820 LOG_ERROR("%s: how to reset?", target_name(target
));
1824 /* registers are now invalid */
1825 register_cache_invalidate(armv8
->arm
.core_cache
);
1827 target
->state
= TARGET_RESET
;
1832 static int aarch64_deassert_reset(struct target
*target
)
1838 /* be certain SRST is off */
1839 jtag_add_reset(0, 0);
1841 retval
= aarch64_poll(target
);
1842 if (retval
!= ERROR_OK
)
1845 if (target
->reset_halt
) {
1846 if (target
->state
!= TARGET_HALTED
) {
1847 LOG_WARNING("%s: ran after reset and before halt ...",
1848 target_name(target
));
1849 retval
= target_halt(target
);
1850 if (retval
!= ERROR_OK
)
1858 static int aarch64_write_apb_ap_memory(struct target
*target
,
1859 uint64_t address
, uint32_t size
,
1860 uint32_t count
, const uint8_t *buffer
)
1862 /* write memory through APB-AP */
1863 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1864 struct armv8_common
*armv8
= target_to_armv8(target
);
1865 struct arm
*arm
= &armv8
->arm
;
1866 int total_bytes
= count
* size
;
1868 int start_byte
= address
& 0x3;
1869 int end_byte
= (address
+ total_bytes
) & 0x3;
1872 uint8_t *tmp_buff
= NULL
;
1874 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count%" PRIu32
,
1875 address
, size
, count
);
1876 if (target
->state
!= TARGET_HALTED
) {
1877 LOG_WARNING("target not halted");
1878 return ERROR_TARGET_NOT_HALTED
;
1881 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1883 /* Mark register R0 as dirty, as it will be used
1884 * for transferring the data.
1885 * It will be restored automatically when exiting
1888 reg
= armv8_reg_current(arm
, 1);
1891 reg
= armv8_reg_current(arm
, 0);
1894 /* clear any abort */
1895 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1896 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1897 if (retval
!= ERROR_OK
)
1901 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1903 /* The algorithm only copies 32 bit words, so the buffer
1904 * should be expanded to include the words at either end.
1905 * The first and last words will be read first to avoid
1906 * corruption if needed.
1908 tmp_buff
= malloc(total_u32
* 4);
1910 if ((start_byte
!= 0) && (total_u32
> 1)) {
1911 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1912 * the other bytes in the word.
1914 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1915 if (retval
!= ERROR_OK
)
1916 goto error_free_buff_w
;
1919 /* If end of write is not aligned, or the write is less than 4 bytes */
1920 if ((end_byte
!= 0) ||
1921 ((total_u32
== 1) && (total_bytes
!= 4))) {
1923 /* Read the last word to avoid corruption during 32 bit write */
1924 int mem_offset
= (total_u32
-1) * 4;
1925 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1926 if (retval
!= ERROR_OK
)
1927 goto error_free_buff_w
;
1930 /* Copy the write buffer over the top of the temporary buffer */
1931 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1933 /* We now have a 32 bit aligned buffer that can be written */
1936 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1937 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1938 if (retval
!= ERROR_OK
)
1939 goto error_free_buff_w
;
1941 /* Set Normal access mode */
1942 dscr
= (dscr
& ~DSCR_MA
);
1943 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1944 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1946 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1947 /* Write X0 with value 'address' using write procedure */
1948 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1949 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
1950 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1951 retval
+= aarch64_exec_opcode(target
,
1952 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1954 /* Write R0 with value 'address' using write procedure */
1955 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1956 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
1957 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1958 retval
+= aarch64_exec_opcode(target
,
1959 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
1962 /* Step 1.d - Change DCC to memory mode */
1963 dscr
= dscr
| DSCR_MA
;
1964 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1965 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1966 if (retval
!= ERROR_OK
)
1967 goto error_unset_dtr_w
;
1970 /* Step 2.a - Do the write */
1971 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1972 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1973 if (retval
!= ERROR_OK
)
1974 goto error_unset_dtr_w
;
1976 /* Step 3.a - Switch DTR mode back to Normal mode */
1977 dscr
= (dscr
& ~DSCR_MA
);
1978 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1979 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1980 if (retval
!= ERROR_OK
)
1981 goto error_unset_dtr_w
;
1983 /* Check for sticky abort flags in the DSCR */
1984 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1985 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1986 if (retval
!= ERROR_OK
)
1987 goto error_free_buff_w
;
1988 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1989 /* Abort occurred - clear it and exit */
1990 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1991 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1992 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1993 goto error_free_buff_w
;
2001 /* Unset DTR mode */
2002 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2003 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2004 dscr
= (dscr
& ~DSCR_MA
);
2005 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2006 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2013 static int aarch64_read_apb_ap_memory(struct target
*target
,
2014 target_addr_t address
, uint32_t size
,
2015 uint32_t count
, uint8_t *buffer
)
2017 /* read memory through APB-AP */
2018 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2019 struct armv8_common
*armv8
= target_to_armv8(target
);
2020 struct arm
*arm
= &armv8
->arm
;
2021 int total_bytes
= count
* size
;
2023 int start_byte
= address
& 0x3;
2024 int end_byte
= (address
+ total_bytes
) & 0x3;
2027 uint8_t *tmp_buff
= NULL
;
2031 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count%" PRIu32
,
2032 address
, size
, count
);
2033 if (target
->state
!= TARGET_HALTED
) {
2034 LOG_WARNING("target not halted");
2035 return ERROR_TARGET_NOT_HALTED
;
2038 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
2039 /* Mark register X0, X1 as dirty, as it will be used
2040 * for transferring the data.
2041 * It will be restored automatically when exiting
2044 reg
= armv8_reg_current(arm
, 1);
2047 reg
= armv8_reg_current(arm
, 0);
2050 /* clear any abort */
2051 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2052 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
2053 if (retval
!= ERROR_OK
)
2054 goto error_free_buff_r
;
2057 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2058 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2060 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2062 /* Set Normal access mode */
2063 dscr
= (dscr
& ~DSCR_MA
);
2064 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2065 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2067 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2068 /* Write X0 with value 'address' using write procedure */
2069 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2070 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
2071 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2072 retval
+= aarch64_exec_opcode(target
, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
2073 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2074 retval
+= aarch64_exec_opcode(target
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
2075 /* Step 1.e - Change DCC to memory mode */
2076 dscr
= dscr
| DSCR_MA
;
2077 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2078 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2079 /* Step 1.f - read DBGDTRTX and discard the value */
2080 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2081 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2083 /* Write R0 with value 'address' using write procedure */
2084 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2085 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
2086 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2087 retval
+= aarch64_exec_opcode(target
,
2088 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
2089 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2090 retval
+= aarch64_exec_opcode(target
,
2091 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr
);
2092 /* Step 1.e - Change DCC to memory mode */
2093 dscr
= dscr
| DSCR_MA
;
2094 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2095 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2096 /* Step 1.f - read DBGDTRTX and discard the value */
2097 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2098 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2101 if (retval
!= ERROR_OK
)
2102 goto error_unset_dtr_r
;
2104 /* Optimize the read as much as we can, either way we read in a single pass */
2105 if ((start_byte
) || (end_byte
)) {
2106 /* The algorithm only copies 32 bit words, so the buffer
2107 * should be expanded to include the words at either end.
2108 * The first and last words will be read into a temp buffer
2109 * to avoid corruption
2111 tmp_buff
= malloc(total_u32
* 4);
2113 goto error_unset_dtr_r
;
2115 /* use the tmp buffer to read the entire data */
2116 u8buf_ptr
= tmp_buff
;
2118 /* address and read length are aligned so read directly into the passed buffer */
2121 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2122 * Abort flags are sticky, so can be read at end of transactions
2124 * This data is read in aligned to 32 bit boundary.
2127 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2128 * increments X0 by 4. */
2129 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
2130 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2131 if (retval
!= ERROR_OK
)
2132 goto error_unset_dtr_r
;
2134 /* Step 3.a - set DTR access mode back to Normal mode */
2135 dscr
= (dscr
& ~DSCR_MA
);
2136 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2137 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2138 if (retval
!= ERROR_OK
)
2139 goto error_free_buff_r
;
2141 /* Step 3.b - read DBGDTRTX for the final value */
2142 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2143 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2144 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
2146 /* Check for sticky abort flags in the DSCR */
2147 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2148 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2149 if (retval
!= ERROR_OK
)
2150 goto error_free_buff_r
;
2151 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2152 /* Abort occurred - clear it and exit */
2153 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2154 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2155 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
2156 goto error_free_buff_r
;
2159 /* check if we need to copy aligned data by applying any shift necessary */
2161 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
2169 /* Unset DTR mode */
2170 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2171 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2172 dscr
= (dscr
& ~DSCR_MA
);
2173 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2174 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2181 static int aarch64_read_phys_memory(struct target
*target
,
2182 target_addr_t address
, uint32_t size
,
2183 uint32_t count
, uint8_t *buffer
)
2185 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2186 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
2187 address
, size
, count
);
2189 if (count
&& buffer
) {
2190 /* read memory through APB-AP */
2191 retval
= aarch64_mmu_modify(target
, 0);
2192 if (retval
!= ERROR_OK
)
2194 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
2199 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2200 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2202 int mmu_enabled
= 0;
2205 /* aarch64 handles unaligned memory access */
2206 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2209 /* determine if MMU was enabled on target stop */
2210 retval
= aarch64_mmu(target
, &mmu_enabled
);
2211 if (retval
!= ERROR_OK
)
2215 retval
= aarch64_check_address(target
, address
);
2216 if (retval
!= ERROR_OK
)
2218 /* enable MMU as we could have disabled it for phys access */
2219 retval
= aarch64_mmu_modify(target
, 1);
2220 if (retval
!= ERROR_OK
)
2223 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
2226 static int aarch64_write_phys_memory(struct target
*target
,
2227 target_addr_t address
, uint32_t size
,
2228 uint32_t count
, const uint8_t *buffer
)
2230 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2232 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2235 if (count
&& buffer
) {
2236 /* write memory through APB-AP */
2237 retval
= aarch64_mmu_modify(target
, 0);
2238 if (retval
!= ERROR_OK
)
2240 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2246 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2247 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2249 int mmu_enabled
= 0;
2252 /* aarch64 handles unaligned memory access */
2253 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
2254 "; count %" PRId32
, address
, size
, count
);
2256 /* determine if MMU was enabled on target stop */
2257 retval
= aarch64_mmu(target
, &mmu_enabled
);
2258 if (retval
!= ERROR_OK
)
2262 retval
= aarch64_check_address(target
, address
);
2263 if (retval
!= ERROR_OK
)
2265 /* enable MMU as we could have disabled it for phys access */
2266 retval
= aarch64_mmu_modify(target
, 1);
2267 if (retval
!= ERROR_OK
)
2270 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2273 static int aarch64_handle_target_request(void *priv
)
2275 struct target
*target
= priv
;
2276 struct armv8_common
*armv8
= target_to_armv8(target
);
2279 if (!target_was_examined(target
))
2281 if (!target
->dbg_msg_enabled
)
2284 if (target
->state
== TARGET_RUNNING
) {
2287 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2288 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2290 /* check if we have data */
2291 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2292 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2293 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2294 if (retval
== ERROR_OK
) {
2295 target_request(target
, request
);
2296 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2297 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2305 static int aarch64_examine_first(struct target
*target
)
2307 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2308 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2309 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2311 int retval
= ERROR_OK
;
2312 uint64_t debug
, ttypr
;
2314 uint32_t tmp0
, tmp1
;
2315 debug
= ttypr
= cpuid
= 0;
2317 /* We do one extra read to ensure DAP is configured,
2318 * we call ahbap_debugport_init(swjdp) instead
2320 retval
= dap_dp_init(swjdp
);
2321 if (retval
!= ERROR_OK
)
2324 /* Search for the APB-AB - it is needed for access to debug registers */
2325 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2326 if (retval
!= ERROR_OK
) {
2327 LOG_ERROR("Could not find APB-AP for debug access");
2331 retval
= mem_ap_init(armv8
->debug_ap
);
2332 if (retval
!= ERROR_OK
) {
2333 LOG_ERROR("Could not initialize the APB-AP");
2337 armv8
->debug_ap
->memaccess_tck
= 80;
2339 if (!target
->dbgbase_set
) {
2341 /* Get ROM Table base */
2343 int32_t coreidx
= target
->coreid
;
2344 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2345 if (retval
!= ERROR_OK
)
2347 /* Lookup 0x15 -- Processor DAP */
2348 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2349 &armv8
->debug_base
, &coreidx
);
2350 if (retval
!= ERROR_OK
)
2352 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2353 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2355 armv8
->debug_base
= target
->dbgbase
;
2357 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2358 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
2359 if (retval
!= ERROR_OK
) {
2360 LOG_DEBUG("LOCK debug access fail");
2364 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2365 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2366 if (retval
!= ERROR_OK
) {
2367 LOG_DEBUG("Examine %s failed", "oslock");
2371 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2372 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2373 if (retval
!= ERROR_OK
) {
2374 LOG_DEBUG("Examine %s failed", "CPUID");
2378 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2379 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2380 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2381 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2382 if (retval
!= ERROR_OK
) {
2383 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2387 ttypr
= (ttypr
<< 32) | tmp0
;
2389 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2390 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
2391 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2392 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
2393 if (retval
!= ERROR_OK
) {
2394 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2398 debug
= (debug
<< 32) | tmp0
;
2400 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2401 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2402 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2404 if (target
->ctibase
== 0) {
2405 /* assume a v8 rom table layout */
2406 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
2407 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
2409 armv8
->cti_base
= target
->ctibase
;
2411 armv8
->arm
.core_type
= ARM_MODE_MON
;
2412 retval
= aarch64_dpm_setup(aarch64
, debug
);
2413 if (retval
!= ERROR_OK
)
2416 /* Setup Breakpoint Register Pairs */
2417 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2418 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2419 aarch64
->brp_num_available
= aarch64
->brp_num
;
2420 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2421 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2422 aarch64
->brp_list
[i
].used
= 0;
2423 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2424 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2426 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2427 aarch64
->brp_list
[i
].value
= 0;
2428 aarch64
->brp_list
[i
].control
= 0;
2429 aarch64
->brp_list
[i
].BRPn
= i
;
2432 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2434 target_set_examined(target
);
2438 static int aarch64_examine(struct target
*target
)
2440 int retval
= ERROR_OK
;
2442 /* don't re-probe hardware after each reset */
2443 if (!target_was_examined(target
))
2444 retval
= aarch64_examine_first(target
);
2446 /* Configure core debug access */
2447 if (retval
== ERROR_OK
)
2448 retval
= aarch64_init_debug_access(target
);
2454 * Cortex-A8 target creation and initialization
2457 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2458 struct target
*target
)
2460 /* examine_first() does a bunch of this */
2464 static int aarch64_init_arch_info(struct target
*target
,
2465 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2467 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2468 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
2470 armv8
->arm
.dap
= dap
;
2472 /* Setup struct aarch64_common */
2473 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2474 /* tap has no dap initialized */
2476 tap
->dap
= dap_init();
2478 /* Leave (only) generic DAP stuff for debugport_init() */
2479 tap
->dap
->tap
= tap
;
2482 armv8
->arm
.dap
= tap
->dap
;
2484 aarch64
->fast_reg_read
= 0;
2486 /* register arch-specific functions */
2487 armv8
->examine_debug_reason
= NULL
;
2489 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2491 armv8
->pre_restore_context
= NULL
;
2493 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2495 /* REVISIT v7a setup should be in a v7a-specific routine */
2496 armv8_init_arch_info(target
, armv8
);
2497 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2502 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2504 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2506 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2509 static int aarch64_mmu(struct target
*target
, int *enabled
)
2511 if (target
->state
!= TARGET_HALTED
) {
2512 LOG_ERROR("%s: target not halted", __func__
);
2513 return ERROR_TARGET_INVALID
;
2516 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2520 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2521 target_addr_t
*phys
)
2523 return armv8_mmu_translate_va(target
, virt
, phys
);
2526 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2528 struct target
*target
= get_current_target(CMD_CTX
);
2529 struct armv8_common
*armv8
= target_to_armv8(target
);
2531 return armv8_handle_cache_info_command(CMD_CTX
,
2532 &armv8
->armv8_mmu
.armv8_cache
);
2536 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2538 struct target
*target
= get_current_target(CMD_CTX
);
2539 if (!target_was_examined(target
)) {
2540 LOG_ERROR("target not examined yet");
2544 return aarch64_init_debug_access(target
);
2546 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2548 struct target
*target
= get_current_target(CMD_CTX
);
2549 /* check target is an smp target */
2550 struct target_list
*head
;
2551 struct target
*curr
;
2552 head
= target
->head
;
2554 if (head
!= (struct target_list
*)NULL
) {
2555 while (head
!= (struct target_list
*)NULL
) {
2556 curr
= head
->target
;
2560 /* fixes the target display to the debugger */
2561 target
->gdb_service
->target
= target
;
2566 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2568 struct target
*target
= get_current_target(CMD_CTX
);
2569 struct target_list
*head
;
2570 struct target
*curr
;
2571 head
= target
->head
;
2572 if (head
!= (struct target_list
*)NULL
) {
2574 while (head
!= (struct target_list
*)NULL
) {
2575 curr
= head
->target
;
2583 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2585 struct target
*target
= get_current_target(CMD_CTX
);
2586 int retval
= ERROR_OK
;
2587 struct target_list
*head
;
2588 head
= target
->head
;
2589 if (head
!= (struct target_list
*)NULL
) {
2590 if (CMD_ARGC
== 1) {
2592 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2593 if (ERROR_OK
!= retval
)
2595 target
->gdb_service
->core
[1] = coreid
;
2598 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2599 , target
->gdb_service
->core
[1]);
2604 static const struct command_registration aarch64_exec_command_handlers
[] = {
2606 .name
= "cache_info",
2607 .handler
= aarch64_handle_cache_info_command
,
2608 .mode
= COMMAND_EXEC
,
2609 .help
= "display information about target caches",
2614 .handler
= aarch64_handle_dbginit_command
,
2615 .mode
= COMMAND_EXEC
,
2616 .help
= "Initialize core debug",
2619 { .name
= "smp_off",
2620 .handler
= aarch64_handle_smp_off_command
,
2621 .mode
= COMMAND_EXEC
,
2622 .help
= "Stop smp handling",
2627 .handler
= aarch64_handle_smp_on_command
,
2628 .mode
= COMMAND_EXEC
,
2629 .help
= "Restart smp handling",
2634 .handler
= aarch64_handle_smp_gdb_command
,
2635 .mode
= COMMAND_EXEC
,
2636 .help
= "display/fix current core played to gdb",
2641 COMMAND_REGISTRATION_DONE
2643 static const struct command_registration aarch64_command_handlers
[] = {
2645 .chain
= arm_command_handlers
,
2648 .chain
= armv8_command_handlers
,
2652 .mode
= COMMAND_ANY
,
2653 .help
= "Cortex-A command group",
2655 .chain
= aarch64_exec_command_handlers
,
2657 COMMAND_REGISTRATION_DONE
2660 struct target_type aarch64_target
= {
2663 .poll
= aarch64_poll
,
2664 .arch_state
= armv8_arch_state
,
2666 .halt
= aarch64_halt
,
2667 .resume
= aarch64_resume
,
2668 .step
= aarch64_step
,
2670 .assert_reset
= aarch64_assert_reset
,
2671 .deassert_reset
= aarch64_deassert_reset
,
2673 /* REVISIT allow exporting VFP3 registers ... */
2674 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2676 .read_memory
= aarch64_read_memory
,
2677 .write_memory
= aarch64_write_memory
,
2679 .checksum_memory
= arm_checksum_memory
,
2680 .blank_check_memory
= arm_blank_check_memory
,
2682 .run_algorithm
= armv4_5_run_algorithm
,
2684 .add_breakpoint
= aarch64_add_breakpoint
,
2685 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2686 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2687 .remove_breakpoint
= aarch64_remove_breakpoint
,
2688 .add_watchpoint
= NULL
,
2689 .remove_watchpoint
= NULL
,
2691 .commands
= aarch64_command_handlers
,
2692 .target_create
= aarch64_target_create
,
2693 .init_target
= aarch64_init_target
,
2694 .examine
= aarch64_examine
,
2696 .read_phys_memory
= aarch64_read_phys_memory
,
2697 .write_phys_memory
= aarch64_write_phys_memory
,
2699 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)