1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "arm_opcodes.h"
30 #include <helper/time_support.h>
32 static int aarch64_poll(struct target
*target
);
33 static int aarch64_debug_entry(struct target
*target
);
34 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
35 static int aarch64_set_breakpoint(struct target
*target
,
36 struct breakpoint
*breakpoint
, uint8_t matchmode
);
37 static int aarch64_set_context_breakpoint(struct target
*target
,
38 struct breakpoint
*breakpoint
, uint8_t matchmode
);
39 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
40 struct breakpoint
*breakpoint
);
41 static int aarch64_unset_breakpoint(struct target
*target
,
42 struct breakpoint
*breakpoint
);
43 static int aarch64_mmu(struct target
*target
, int *enabled
);
44 static int aarch64_virt2phys(struct target
*target
,
45 target_addr_t virt
, target_addr_t
*phys
);
46 static int aarch64_read_apb_ab_memory(struct target
*target
,
47 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
48 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
49 uint32_t opcode
, uint32_t data
);
51 static int aarch64_restore_system_control_reg(struct target
*target
)
53 int retval
= ERROR_OK
;
55 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
56 struct armv8_common
*armv8
= target_to_armv8(target
);
58 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
59 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
60 retval
= aarch64_instr_write_data_r0(armv8
->arm
.dpm
,
62 aarch64
->system_control_reg
);
68 /* check address before aarch64_apb read write access with mmu on
69 * remove apb predictible data abort */
70 static int aarch64_check_address(struct target
*target
, uint32_t address
)
75 /* modify system_control_reg in order to enable or disable mmu for :
76 * - virt2phys address conversion
77 * - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target
*target
, int enable
)
80 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
81 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
82 int retval
= ERROR_OK
;
85 /* if mmu enabled at target stop and mmu not enable */
86 if (!(aarch64
->system_control_reg
& 0x1U
)) {
87 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
90 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
91 aarch64
->system_control_reg_curr
|= 0x1U
;
92 retval
= aarch64_instr_write_data_r0(armv8
->arm
.dpm
,
94 aarch64
->system_control_reg_curr
);
97 if (aarch64
->system_control_reg_curr
& 0x4U
) {
98 /* data cache is active */
99 aarch64
->system_control_reg_curr
&= ~0x4U
;
100 /* flush data cache armv7 function to be called */
101 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
102 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
104 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
105 aarch64
->system_control_reg_curr
&= ~0x1U
;
106 retval
= aarch64_instr_write_data_r0(armv8
->arm
.dpm
,
108 aarch64
->system_control_reg_curr
);
115 * Basic debug access, very low level assumes state is saved
117 static int aarch64_init_debug_access(struct target
*target
)
119 struct armv8_common
*armv8
= target_to_armv8(target
);
125 /* Unlocking the debug registers for modification
126 * The debugport might be uninitialised so try twice */
127 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
128 armv8
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
129 if (retval
!= ERROR_OK
) {
131 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
132 armv8
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
133 if (retval
== ERROR_OK
)
134 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
136 if (retval
!= ERROR_OK
)
138 /* Clear Sticky Power Down status Bit in PRSR to enable access to
139 the registers in the Core Power Domain */
140 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
141 armv8
->debug_base
+ CPUDBG_PRSR
, &dummy
);
142 if (retval
!= ERROR_OK
)
145 /* Enabling of instruction execution in debug mode is done in debug_entry code */
147 /* Resync breakpoint registers */
149 /* Since this is likely called from init or reset, update target state information*/
150 return aarch64_poll(target
);
153 /* To reduce needless round-trips, pass in a pointer to the current
154 * DSCR value. Initialize it to zero if you just need to know the
155 * value on return from this function; or DSCR_INSTR_COMP if you
156 * happen to know that no instruction is pending.
158 static int aarch64_exec_opcode(struct target
*target
,
159 uint32_t opcode
, uint32_t *dscr_p
)
163 struct armv8_common
*armv8
= target_to_armv8(target
);
164 dscr
= dscr_p
? *dscr_p
: 0;
166 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
168 /* Wait for InstrCompl bit to be set */
169 long long then
= timeval_ms();
170 while ((dscr
& DSCR_INSTR_COMP
) == 0) {
171 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
172 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
173 if (retval
!= ERROR_OK
) {
174 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
177 if (timeval_ms() > then
+ 1000) {
178 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
183 retval
= mem_ap_write_u32(armv8
->debug_ap
,
184 armv8
->debug_base
+ CPUDBG_ITR
, opcode
);
185 if (retval
!= ERROR_OK
)
190 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
191 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
192 if (retval
!= ERROR_OK
) {
193 LOG_ERROR("Could not read DSCR register");
196 if (timeval_ms() > then
+ 1000) {
197 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
200 } while ((dscr
& DSCR_INSTR_COMP
) == 0); /* Wait for InstrCompl bit to be set */
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
214 struct armv8_common
*armv8
= target_to_armv8(target
);
216 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
222 * AARCH64 implementation of Debug Programmer's Model
224 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
225 * so there's no need to poll for it before executing an instruction.
227 * NOTE that in several of these cases the "stall" mode might be useful.
228 * It'd let us queue a few operations together... prepare/finish might
229 * be the places to enable/disable that mode.
232 static inline struct aarch64_common
*dpm_to_a8(struct arm_dpm
*dpm
)
234 return container_of(dpm
, struct aarch64_common
, armv8_common
.dpm
);
237 static int aarch64_write_dcc(struct aarch64_common
*a8
, uint32_t data
)
239 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
240 return mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
241 a8
->armv8_common
.debug_base
+ CPUDBG_DTRRX
, data
);
244 static int aarch64_write_dcc_64(struct aarch64_common
*a8
, uint64_t data
)
247 LOG_DEBUG("write DCC 0x%08" PRIx32
, (unsigned)data
);
248 LOG_DEBUG("write DCC 0x%08" PRIx32
, (unsigned)(data
>> 32));
249 ret
= mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
250 a8
->armv8_common
.debug_base
+ CPUDBG_DTRRX
, data
);
251 ret
+= mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
252 a8
->armv8_common
.debug_base
+ CPUDBG_DTRTX
, data
>> 32);
256 static int aarch64_read_dcc(struct aarch64_common
*a8
, uint32_t *data
,
259 uint32_t dscr
= DSCR_INSTR_COMP
;
265 /* Wait for DTRRXfull */
266 long long then
= timeval_ms();
267 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
268 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
269 a8
->armv8_common
.debug_base
+ CPUDBG_DSCR
,
271 if (retval
!= ERROR_OK
)
273 if (timeval_ms() > then
+ 1000) {
274 LOG_ERROR("Timeout waiting for read dcc");
279 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
280 a8
->armv8_common
.debug_base
+ CPUDBG_DTRTX
,
282 if (retval
!= ERROR_OK
)
284 LOG_DEBUG("read DCC 0x%08" PRIx32
, *data
);
291 static int aarch64_read_dcc_64(struct aarch64_common
*a8
, uint64_t *data
,
294 uint32_t dscr
= DSCR_INSTR_COMP
;
301 /* Wait for DTRRXfull */
302 long long then
= timeval_ms();
303 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
304 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
305 a8
->armv8_common
.debug_base
+ CPUDBG_DSCR
,
307 if (retval
!= ERROR_OK
)
309 if (timeval_ms() > then
+ 1000) {
310 LOG_ERROR("Timeout waiting for read dcc");
315 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
316 a8
->armv8_common
.debug_base
+ CPUDBG_DTRTX
,
318 if (retval
!= ERROR_OK
)
321 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
322 a8
->armv8_common
.debug_base
+ CPUDBG_DTRRX
,
324 if (retval
!= ERROR_OK
)
327 *data
= *(uint32_t *)data
| (uint64_t)higher
<< 32;
328 LOG_DEBUG("read DCC 0x%16.16" PRIx64
, *data
);
336 static int aarch64_dpm_prepare(struct arm_dpm
*dpm
)
338 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
342 /* set up invariant: INSTR_COMP is set after ever DPM operation */
343 long long then
= timeval_ms();
345 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
346 a8
->armv8_common
.debug_base
+ CPUDBG_DSCR
,
348 if (retval
!= ERROR_OK
)
350 if ((dscr
& DSCR_INSTR_COMP
) != 0)
352 if (timeval_ms() > then
+ 1000) {
353 LOG_ERROR("Timeout waiting for dpm prepare");
358 /* this "should never happen" ... */
359 if (dscr
& DSCR_DTR_RX_FULL
) {
360 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
362 retval
= aarch64_exec_opcode(
363 a8
->armv8_common
.arm
.target
,
366 if (retval
!= ERROR_OK
)
373 static int aarch64_dpm_finish(struct arm_dpm
*dpm
)
375 /* REVISIT what could be done here? */
379 static int aarch64_instr_execute(struct arm_dpm
*dpm
,
382 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
383 uint32_t dscr
= DSCR_ITE
;
385 return aarch64_exec_opcode(
386 a8
->armv8_common
.arm
.target
,
391 static int aarch64_instr_write_data_dcc(struct arm_dpm
*dpm
,
392 uint32_t opcode
, uint32_t data
)
394 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
396 uint32_t dscr
= DSCR_INSTR_COMP
;
398 retval
= aarch64_write_dcc(a8
, data
);
399 if (retval
!= ERROR_OK
)
402 return aarch64_exec_opcode(
403 a8
->armv8_common
.arm
.target
,
408 static int aarch64_instr_write_data_dcc_64(struct arm_dpm
*dpm
,
409 uint32_t opcode
, uint64_t data
)
411 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
413 uint32_t dscr
= DSCR_INSTR_COMP
;
415 retval
= aarch64_write_dcc_64(a8
, data
);
416 if (retval
!= ERROR_OK
)
419 return aarch64_exec_opcode(
420 a8
->armv8_common
.arm
.target
,
425 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
426 uint32_t opcode
, uint32_t data
)
428 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
429 uint32_t dscr
= DSCR_INSTR_COMP
;
432 retval
= aarch64_write_dcc(a8
, data
);
433 if (retval
!= ERROR_OK
)
436 retval
= aarch64_exec_opcode(
437 a8
->armv8_common
.arm
.target
,
440 if (retval
!= ERROR_OK
)
443 /* then the opcode, taking data from R0 */
444 retval
= aarch64_exec_opcode(
445 a8
->armv8_common
.arm
.target
,
452 static int aarch64_instr_write_data_r0_64(struct arm_dpm
*dpm
,
453 uint32_t opcode
, uint64_t data
)
455 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
456 uint32_t dscr
= DSCR_INSTR_COMP
;
459 retval
= aarch64_write_dcc_64(a8
, data
);
460 if (retval
!= ERROR_OK
)
463 retval
= aarch64_exec_opcode(
464 a8
->armv8_common
.arm
.target
,
467 if (retval
!= ERROR_OK
)
470 /* then the opcode, taking data from R0 */
471 retval
= aarch64_exec_opcode(
472 a8
->armv8_common
.arm
.target
,
479 static int aarch64_instr_cpsr_sync(struct arm_dpm
*dpm
)
481 struct target
*target
= dpm
->arm
->target
;
482 uint32_t dscr
= DSCR_INSTR_COMP
;
484 /* "Prefetch flush" after modifying execution status in CPSR */
485 return aarch64_exec_opcode(target
,
486 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
490 static int aarch64_instr_read_data_dcc(struct arm_dpm
*dpm
,
491 uint32_t opcode
, uint32_t *data
)
493 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
495 uint32_t dscr
= DSCR_INSTR_COMP
;
497 /* the opcode, writing data to DCC */
498 retval
= aarch64_exec_opcode(
499 a8
->armv8_common
.arm
.target
,
502 if (retval
!= ERROR_OK
)
505 return aarch64_read_dcc(a8
, data
, &dscr
);
508 static int aarch64_instr_read_data_dcc_64(struct arm_dpm
*dpm
,
509 uint32_t opcode
, uint64_t *data
)
511 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
513 uint32_t dscr
= DSCR_INSTR_COMP
;
515 /* the opcode, writing data to DCC */
516 retval
= aarch64_exec_opcode(
517 a8
->armv8_common
.arm
.target
,
520 if (retval
!= ERROR_OK
)
523 return aarch64_read_dcc_64(a8
, data
, &dscr
);
526 static int aarch64_instr_read_data_r0(struct arm_dpm
*dpm
,
527 uint32_t opcode
, uint32_t *data
)
529 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
530 uint32_t dscr
= DSCR_INSTR_COMP
;
533 /* the opcode, writing data to R0 */
534 retval
= aarch64_exec_opcode(
535 a8
->armv8_common
.arm
.target
,
538 if (retval
!= ERROR_OK
)
541 /* write R0 to DCC */
542 retval
= aarch64_exec_opcode(
543 a8
->armv8_common
.arm
.target
,
544 0xd5130400, /* msr dbgdtr_el0, x0 */
546 if (retval
!= ERROR_OK
)
549 return aarch64_read_dcc(a8
, data
, &dscr
);
552 static int aarch64_instr_read_data_r0_64(struct arm_dpm
*dpm
,
553 uint32_t opcode
, uint64_t *data
)
555 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
556 uint32_t dscr
= DSCR_INSTR_COMP
;
559 /* the opcode, writing data to R0 */
560 retval
= aarch64_exec_opcode(
561 a8
->armv8_common
.arm
.target
,
564 if (retval
!= ERROR_OK
)
567 /* write R0 to DCC */
568 retval
= aarch64_exec_opcode(
569 a8
->armv8_common
.arm
.target
,
570 0xd5130400, /* msr dbgdtr_el0, x0 */
572 if (retval
!= ERROR_OK
)
575 return aarch64_read_dcc_64(a8
, data
, &dscr
);
578 static int aarch64_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
579 uint32_t addr
, uint32_t control
)
581 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
582 uint32_t vr
= a8
->armv8_common
.debug_base
;
583 uint32_t cr
= a8
->armv8_common
.debug_base
;
587 case 0 ... 15: /* breakpoints */
588 vr
+= CPUDBG_BVR_BASE
;
589 cr
+= CPUDBG_BCR_BASE
;
591 case 16 ... 31: /* watchpoints */
592 vr
+= CPUDBG_WVR_BASE
;
593 cr
+= CPUDBG_WCR_BASE
;
602 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
603 (unsigned) vr
, (unsigned) cr
);
605 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
607 if (retval
!= ERROR_OK
)
609 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
614 static int aarch64_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
619 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
624 cr
= a8
->armv8_common
.debug_base
+ CPUDBG_BCR_BASE
;
627 cr
= a8
->armv8_common
.debug_base
+ CPUDBG_WCR_BASE
;
635 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr
);
637 /* clear control register */
638 return aarch64_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
642 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint32_t debug
)
644 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
647 dpm
->arm
= &a8
->armv8_common
.arm
;
650 dpm
->prepare
= aarch64_dpm_prepare
;
651 dpm
->finish
= aarch64_dpm_finish
;
653 dpm
->instr_execute
= aarch64_instr_execute
;
654 dpm
->instr_write_data_dcc
= aarch64_instr_write_data_dcc
;
655 dpm
->instr_write_data_dcc_64
= aarch64_instr_write_data_dcc_64
;
656 dpm
->instr_write_data_r0
= aarch64_instr_write_data_r0
;
657 dpm
->instr_write_data_r0_64
= aarch64_instr_write_data_r0_64
;
658 dpm
->instr_cpsr_sync
= aarch64_instr_cpsr_sync
;
660 dpm
->instr_read_data_dcc
= aarch64_instr_read_data_dcc
;
661 dpm
->instr_read_data_dcc_64
= aarch64_instr_read_data_dcc_64
;
662 dpm
->instr_read_data_r0
= aarch64_instr_read_data_r0
;
663 dpm
->instr_read_data_r0_64
= aarch64_instr_read_data_r0_64
;
665 dpm
->arm_reg_current
= armv8_reg_current
;
667 dpm
->bpwp_enable
= aarch64_bpwp_enable
;
668 dpm
->bpwp_disable
= aarch64_bpwp_disable
;
670 retval
= armv8_dpm_setup(dpm
);
671 if (retval
== ERROR_OK
)
672 retval
= armv8_dpm_initialize(dpm
);
676 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
678 struct target_list
*head
;
682 while (head
!= (struct target_list
*)NULL
) {
684 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
690 static int aarch64_halt(struct target
*target
);
692 static int aarch64_halt_smp(struct target
*target
)
695 struct target_list
*head
;
698 while (head
!= (struct target_list
*)NULL
) {
700 if ((curr
!= target
) && (curr
->state
!= TARGET_HALTED
))
701 retval
+= aarch64_halt(curr
);
707 static int update_halt_gdb(struct target
*target
)
710 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
711 target
->gdb_service
->target
= target
;
712 target
->gdb_service
->core
[0] = target
->coreid
;
713 retval
+= aarch64_halt_smp(target
);
719 * Cortex-A8 Run control
722 static int aarch64_poll(struct target
*target
)
724 int retval
= ERROR_OK
;
726 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
727 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
728 enum target_state prev_target_state
= target
->state
;
729 /* toggle to another core is done by gdb as follow */
730 /* maint packet J core_id */
732 /* the next polling trigger an halt event sent to gdb */
733 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
734 (target
->gdb_service
) &&
735 (target
->gdb_service
->target
== NULL
)) {
736 target
->gdb_service
->target
=
737 get_aarch64(target
, target
->gdb_service
->core
[1]);
738 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
741 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
742 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
743 if (retval
!= ERROR_OK
)
745 aarch64
->cpudbg_dscr
= dscr
;
747 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
)) {
748 if (prev_target_state
!= TARGET_HALTED
) {
749 /* We have a halting debug event */
750 LOG_DEBUG("Target halted");
751 target
->state
= TARGET_HALTED
;
752 if ((prev_target_state
== TARGET_RUNNING
)
753 || (prev_target_state
== TARGET_UNKNOWN
)
754 || (prev_target_state
== TARGET_RESET
)) {
755 retval
= aarch64_debug_entry(target
);
756 if (retval
!= ERROR_OK
)
759 retval
= update_halt_gdb(target
);
760 if (retval
!= ERROR_OK
)
763 target_call_event_callbacks(target
,
764 TARGET_EVENT_HALTED
);
766 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
769 retval
= aarch64_debug_entry(target
);
770 if (retval
!= ERROR_OK
)
773 retval
= update_halt_gdb(target
);
774 if (retval
!= ERROR_OK
)
778 target_call_event_callbacks(target
,
779 TARGET_EVENT_DEBUG_HALTED
);
782 } else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
783 target
->state
= TARGET_RUNNING
;
785 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
786 target
->state
= TARGET_UNKNOWN
;
792 static int aarch64_halt(struct target
*target
)
794 int retval
= ERROR_OK
;
796 struct armv8_common
*armv8
= target_to_armv8(target
);
798 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
799 armv8
->debug_base
+ 0x10000 + 0, &dscr
);
800 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
801 armv8
->debug_base
+ 0x10000 + 0, 1);
802 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
803 armv8
->debug_base
+ 0x10000 + 0, &dscr
);
805 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
806 armv8
->debug_base
+ 0x10000 + 0x140, &dscr
);
807 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
808 armv8
->debug_base
+ 0x10000 + 0x140, 6);
809 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
810 armv8
->debug_base
+ 0x10000 + 0x140, &dscr
);
812 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
813 armv8
->debug_base
+ 0x10000 + 0xa0, &dscr
);
814 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
815 armv8
->debug_base
+ 0x10000 + 0xa0, 5);
816 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
817 armv8
->debug_base
+ 0x10000 + 0xa0, &dscr
);
819 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
820 armv8
->debug_base
+ 0x10000 + 0xa4, &dscr
);
821 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
822 armv8
->debug_base
+ 0x10000 + 0xa4, 2);
823 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
824 armv8
->debug_base
+ 0x10000 + 0xa4, &dscr
);
826 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
827 armv8
->debug_base
+ 0x10000 + 0x20, &dscr
);
828 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
829 armv8
->debug_base
+ 0x10000 + 0x20, 4);
830 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
831 armv8
->debug_base
+ 0x10000 + 0x20, &dscr
);
834 * enter halting debug mode
836 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
837 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
838 if (retval
!= ERROR_OK
)
842 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
843 armv8
->debug_base
+ 0x10000 + 0x134, &dscr
);
845 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
846 armv8
->debug_base
+ 0x10000 + 0x1c, &dscr
);
847 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
848 armv8
->debug_base
+ 0x10000 + 0x1c, 1);
849 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
850 armv8
->debug_base
+ 0x10000 + 0x1c, &dscr
);
853 long long then
= timeval_ms();
855 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
856 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
857 if (retval
!= ERROR_OK
)
859 if ((dscr
& DSCR_CORE_HALTED
) != 0)
861 if (timeval_ms() > then
+ 1000) {
862 LOG_ERROR("Timeout waiting for halt");
867 target
->debug_reason
= DBG_REASON_DBGRQ
;
872 static int aarch64_internal_restore(struct target
*target
, int current
,
873 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
875 struct armv8_common
*armv8
= target_to_armv8(target
);
876 struct arm
*arm
= &armv8
->arm
;
880 if (!debug_execution
)
881 target_free_all_working_areas(target
);
883 /* current = 1: continue on current pc, otherwise continue at <address> */
884 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
886 resume_pc
= *address
;
888 *address
= resume_pc
;
890 /* Make sure that the Armv7 gdb thumb fixups does not
891 * kill the return address
893 switch (arm
->core_state
) {
895 resume_pc
&= 0xFFFFFFFC;
897 case ARM_STATE_AARCH64
:
898 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
900 case ARM_STATE_THUMB
:
901 case ARM_STATE_THUMB_EE
:
902 /* When the return address is loaded into PC
903 * bit 0 must be 1 to stay in Thumb state
907 case ARM_STATE_JAZELLE
:
908 LOG_ERROR("How do I resume into Jazelle state??");
911 LOG_DEBUG("resume pc = 0x%16" PRIx64
, resume_pc
);
912 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
915 dpmv8_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
917 /* called it now before restoring context because it uses cpu
918 * register r0 for restoring system control register */
919 retval
= aarch64_restore_system_control_reg(target
);
920 if (retval
!= ERROR_OK
)
922 retval
= aarch64_restore_context(target
, handle_breakpoints
);
923 if (retval
!= ERROR_OK
)
925 target
->debug_reason
= DBG_REASON_NOTHALTED
;
926 target
->state
= TARGET_RUNNING
;
928 /* registers are now invalid */
929 register_cache_invalidate(arm
->core_cache
);
932 /* the front-end may request us not to handle breakpoints */
933 if (handle_breakpoints
) {
934 /* Single step past breakpoint at current address */
935 breakpoint
= breakpoint_find(target
, resume_pc
);
937 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
938 cortex_m3_unset_breakpoint(target
, breakpoint
);
939 cortex_m3_single_step_core(target
);
940 cortex_m3_set_breakpoint(target
, breakpoint
);
948 static int aarch64_internal_restart(struct target
*target
)
950 struct armv8_common
*armv8
= target_to_armv8(target
);
951 struct arm
*arm
= &armv8
->arm
;
955 * * Restart core and wait for it to be started. Clear ITRen and sticky
956 * * exception flags: see ARMv7 ARM, C5.9.
958 * REVISIT: for single stepping, we probably want to
959 * disable IRQs by default, with optional override...
962 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
963 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
964 if (retval
!= ERROR_OK
)
967 if ((dscr
& DSCR_INSTR_COMP
) == 0)
968 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
970 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
971 armv8
->debug_base
+ CPUDBG_DSCR
, dscr
& ~DSCR_ITR_EN
);
972 if (retval
!= ERROR_OK
)
975 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
976 armv8
->debug_base
+ CPUDBG_DRCR
, DRCR_RESTART
|
977 DRCR_CLEAR_EXCEPTIONS
);
978 if (retval
!= ERROR_OK
)
981 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
982 armv8
->debug_base
+ 0x10000 + 0x10, 1);
983 if (retval
!= ERROR_OK
)
986 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
987 armv8
->debug_base
+ 0x10000 + 0x1c, 2);
988 if (retval
!= ERROR_OK
)
991 long long then
= timeval_ms();
993 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
994 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
995 if (retval
!= ERROR_OK
)
997 if ((dscr
& DSCR_CORE_RESTARTED
) != 0)
999 if (timeval_ms() > then
+ 1000) {
1000 LOG_ERROR("Timeout waiting for resume");
1005 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1006 target
->state
= TARGET_RUNNING
;
1008 /* registers are now invalid */
1009 register_cache_invalidate(arm
->core_cache
);
1014 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
1017 struct target_list
*head
;
1018 struct target
*curr
;
1020 head
= target
->head
;
1021 while (head
!= (struct target_list
*)NULL
) {
1022 curr
= head
->target
;
1023 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
1024 /* resume current address , not in step mode */
1025 retval
+= aarch64_internal_restore(curr
, 1, &address
,
1026 handle_breakpoints
, 0);
1027 retval
+= aarch64_internal_restart(curr
);
1035 static int aarch64_resume(struct target
*target
, int current
,
1036 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1039 uint64_t addr
= address
;
1041 /* dummy resume for smp toggle in order to reduce gdb impact */
1042 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
1043 /* simulate a start and halt of target */
1044 target
->gdb_service
->target
= NULL
;
1045 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
1046 /* fake resume at next poll we play the target core[1], see poll*/
1047 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1050 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
1053 target
->gdb_service
->core
[0] = -1;
1054 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
1055 if (retval
!= ERROR_OK
)
1058 aarch64_internal_restart(target
);
1060 if (!debug_execution
) {
1061 target
->state
= TARGET_RUNNING
;
1062 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1063 LOG_DEBUG("target resumed at 0x%" PRIu64
, addr
);
1065 target
->state
= TARGET_DEBUG_RUNNING
;
1066 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1067 LOG_DEBUG("target debug resumed at 0x%" PRIu64
, addr
);
1073 static int aarch64_debug_entry(struct target
*target
)
1076 int retval
= ERROR_OK
;
1077 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1078 struct armv8_common
*armv8
= target_to_armv8(target
);
1081 LOG_DEBUG("dscr = 0x%08" PRIx32
, aarch64
->cpudbg_dscr
);
1083 /* REVISIT surely we should not re-read DSCR !! */
1084 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1085 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1086 if (retval
!= ERROR_OK
)
1089 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1090 * imprecise data aborts get discarded by issuing a Data
1091 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1094 /* Enable the ITR execution once we are in debug mode */
1095 dscr
|= DSCR_ITR_EN
;
1096 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1097 armv8
->debug_base
+ CPUDBG_DSCR
, dscr
);
1098 if (retval
!= ERROR_OK
)
1101 /* Examine debug reason */
1102 arm_dpm_report_dscr(&armv8
->dpm
, aarch64
->cpudbg_dscr
);
1103 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1104 armv8
->debug_base
+ CPUDBG_DESR
, &tmp
);
1105 if ((tmp
& 0x7) == 0x4)
1106 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1108 /* save address of instruction that triggered the watchpoint? */
1109 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
1112 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1113 armv8
->debug_base
+ CPUDBG_WFAR
,
1115 if (retval
!= ERROR_OK
)
1117 arm_dpm_report_wfar(&armv8
->dpm
, wfar
);
1120 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1122 if (armv8
->post_debug_entry
) {
1123 retval
= armv8
->post_debug_entry(target
);
1124 if (retval
!= ERROR_OK
)
1131 static int aarch64_post_debug_entry(struct target
*target
)
1133 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1134 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1135 struct armv8_mmu_common
*armv8_mmu
= &armv8
->armv8_mmu
;
1136 uint32_t sctlr_el1
= 0;
1139 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1140 armv8
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1141 retval
= aarch64_instr_read_data_r0(armv8
->arm
.dpm
,
1142 0xd5381000, &sctlr_el1
);
1143 if (retval
!= ERROR_OK
)
1146 LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1
);
1147 aarch64
->system_control_reg
= sctlr_el1
;
1148 aarch64
->system_control_reg_curr
= sctlr_el1
;
1149 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
1151 armv8_mmu
->mmu_enabled
= sctlr_el1
& 0x1U
? 1 : 0;
1152 armv8_mmu
->armv8_cache
.d_u_cache_enabled
= sctlr_el1
& 0x4U
? 1 : 0;
1153 armv8_mmu
->armv8_cache
.i_cache_enabled
= sctlr_el1
& 0x1000U
? 1 : 0;
1156 if (armv8
->armv8_mmu
.armv8_cache
.ctype
== -1)
1157 armv8_identify_cache(target
);
1163 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1164 int handle_breakpoints
)
1166 struct armv8_common
*armv8
= target_to_armv8(target
);
1170 if (target
->state
!= TARGET_HALTED
) {
1171 LOG_WARNING("target not halted");
1172 return ERROR_TARGET_NOT_HALTED
;
1175 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1176 armv8
->debug_base
+ CPUDBG_DECR
, &tmp
);
1177 if (retval
!= ERROR_OK
)
1180 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1181 armv8
->debug_base
+ CPUDBG_DECR
, (tmp
|0x4));
1182 if (retval
!= ERROR_OK
)
1185 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1186 retval
= aarch64_resume(target
, 1, address
, 0, 0);
1187 if (retval
!= ERROR_OK
)
1190 long long then
= timeval_ms();
1191 while (target
->state
!= TARGET_HALTED
) {
1192 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1193 armv8
->debug_base
+ CPUDBG_DESR
, &tmp
);
1194 LOG_DEBUG("DESR = %#x", tmp
);
1195 retval
= aarch64_poll(target
);
1196 if (retval
!= ERROR_OK
)
1198 if (timeval_ms() > then
+ 1000) {
1199 LOG_ERROR("timeout waiting for target halt");
1204 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1205 armv8
->debug_base
+ CPUDBG_DECR
, (tmp
&(~0x4)));
1206 if (retval
!= ERROR_OK
)
1209 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1210 if (target
->state
== TARGET_HALTED
)
1211 LOG_DEBUG("target stepped");
1216 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1218 struct armv8_common
*armv8
= target_to_armv8(target
);
1222 if (armv8
->pre_restore_context
)
1223 armv8
->pre_restore_context(target
);
1225 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1230 * Cortex-A8 Breakpoint and watchpoint functions
1233 /* Setup hardware Breakpoint Register Pair */
1234 static int aarch64_set_breakpoint(struct target
*target
,
1235 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1240 uint8_t byte_addr_select
= 0x0F;
1241 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1242 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1243 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1246 if (breakpoint
->set
) {
1247 LOG_WARNING("breakpoint already set");
1251 if (breakpoint
->type
== BKPT_HARD
) {
1253 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1255 if (brp_i
>= aarch64
->brp_num
) {
1256 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1257 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1259 breakpoint
->set
= brp_i
+ 1;
1260 if (breakpoint
->length
== 2)
1261 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1262 control
= ((matchmode
& 0x7) << 20)
1264 | (byte_addr_select
<< 5)
1266 brp_list
[brp_i
].used
= 1;
1267 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1268 brp_list
[brp_i
].control
= control
;
1269 bpt_value
= brp_list
[brp_i
].value
;
1271 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1272 + CPUDBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1273 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1274 if (retval
!= ERROR_OK
)
1276 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1277 + CPUDBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1278 (uint32_t)(bpt_value
>> 32));
1279 if (retval
!= ERROR_OK
)
1282 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1283 + CPUDBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1284 brp_list
[brp_i
].control
);
1285 if (retval
!= ERROR_OK
)
1287 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1288 brp_list
[brp_i
].control
,
1289 brp_list
[brp_i
].value
);
1291 } else if (breakpoint
->type
== BKPT_SOFT
) {
1293 buf_set_u32(code
, 0, 32, 0xD4400000);
1295 retval
= target_read_memory(target
,
1296 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1297 breakpoint
->length
, 1,
1298 breakpoint
->orig_instr
);
1299 if (retval
!= ERROR_OK
)
1301 retval
= target_write_memory(target
,
1302 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1303 breakpoint
->length
, 1, code
);
1304 if (retval
!= ERROR_OK
)
1306 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1309 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1310 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1311 /* Ensure that halting debug mode is enable */
1312 dscr
= dscr
| DSCR_HALT_DBG_MODE
;
1313 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1314 armv8
->debug_base
+ CPUDBG_DSCR
, dscr
);
1315 if (retval
!= ERROR_OK
) {
1316 LOG_DEBUG("Failed to set DSCR.HDE");
1323 static int aarch64_set_context_breakpoint(struct target
*target
,
1324 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1326 int retval
= ERROR_FAIL
;
1329 uint8_t byte_addr_select
= 0x0F;
1330 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1331 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1332 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1334 if (breakpoint
->set
) {
1335 LOG_WARNING("breakpoint already set");
1338 /*check available context BRPs*/
1339 while ((brp_list
[brp_i
].used
||
1340 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1343 if (brp_i
>= aarch64
->brp_num
) {
1344 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1348 breakpoint
->set
= brp_i
+ 1;
1349 control
= ((matchmode
& 0x7) << 20)
1350 | (byte_addr_select
<< 5)
1352 brp_list
[brp_i
].used
= 1;
1353 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1354 brp_list
[brp_i
].control
= control
;
1355 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1356 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1357 brp_list
[brp_i
].value
);
1358 if (retval
!= ERROR_OK
)
1360 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1361 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1362 brp_list
[brp_i
].control
);
1363 if (retval
!= ERROR_OK
)
1365 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1366 brp_list
[brp_i
].control
,
1367 brp_list
[brp_i
].value
);
1372 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1374 int retval
= ERROR_FAIL
;
1375 int brp_1
= 0; /* holds the contextID pair */
1376 int brp_2
= 0; /* holds the IVA pair */
1377 uint32_t control_CTX
, control_IVA
;
1378 uint8_t CTX_byte_addr_select
= 0x0F;
1379 uint8_t IVA_byte_addr_select
= 0x0F;
1380 uint8_t CTX_machmode
= 0x03;
1381 uint8_t IVA_machmode
= 0x01;
1382 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1383 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1384 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1386 if (breakpoint
->set
) {
1387 LOG_WARNING("breakpoint already set");
1390 /*check available context BRPs*/
1391 while ((brp_list
[brp_1
].used
||
1392 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1395 printf("brp(CTX) found num: %d\n", brp_1
);
1396 if (brp_1
>= aarch64
->brp_num
) {
1397 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1401 while ((brp_list
[brp_2
].used
||
1402 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1405 printf("brp(IVA) found num: %d\n", brp_2
);
1406 if (brp_2
>= aarch64
->brp_num
) {
1407 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1411 breakpoint
->set
= brp_1
+ 1;
1412 breakpoint
->linked_BRP
= brp_2
;
1413 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1416 | (CTX_byte_addr_select
<< 5)
1418 brp_list
[brp_1
].used
= 1;
1419 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1420 brp_list
[brp_1
].control
= control_CTX
;
1421 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1422 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_1
].BRPn
,
1423 brp_list
[brp_1
].value
);
1424 if (retval
!= ERROR_OK
)
1426 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1427 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_1
].BRPn
,
1428 brp_list
[brp_1
].control
);
1429 if (retval
!= ERROR_OK
)
1432 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1434 | (IVA_byte_addr_select
<< 5)
1436 brp_list
[brp_2
].used
= 1;
1437 brp_list
[brp_2
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1438 brp_list
[brp_2
].control
= control_IVA
;
1439 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1440 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_2
].BRPn
,
1441 brp_list
[brp_2
].value
);
1442 if (retval
!= ERROR_OK
)
1444 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1445 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_2
].BRPn
,
1446 brp_list
[brp_2
].control
);
1447 if (retval
!= ERROR_OK
)
1453 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1456 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1457 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1458 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1460 if (!breakpoint
->set
) {
1461 LOG_WARNING("breakpoint not set");
1465 if (breakpoint
->type
== BKPT_HARD
) {
1466 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1467 int brp_i
= breakpoint
->set
- 1;
1468 int brp_j
= breakpoint
->linked_BRP
;
1469 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1470 LOG_DEBUG("Invalid BRP number in breakpoint");
1473 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1474 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1475 brp_list
[brp_i
].used
= 0;
1476 brp_list
[brp_i
].value
= 0;
1477 brp_list
[brp_i
].control
= 0;
1478 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1479 + CPUDBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1480 brp_list
[brp_i
].control
);
1481 if (retval
!= ERROR_OK
)
1483 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1484 LOG_DEBUG("Invalid BRP number in breakpoint");
1487 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1488 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1489 brp_list
[brp_j
].used
= 0;
1490 brp_list
[brp_j
].value
= 0;
1491 brp_list
[brp_j
].control
= 0;
1492 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1493 + CPUDBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1494 brp_list
[brp_j
].control
);
1495 if (retval
!= ERROR_OK
)
1497 breakpoint
->linked_BRP
= 0;
1498 breakpoint
->set
= 0;
1502 int brp_i
= breakpoint
->set
- 1;
1503 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1504 LOG_DEBUG("Invalid BRP number in breakpoint");
1507 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1508 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1509 brp_list
[brp_i
].used
= 0;
1510 brp_list
[brp_i
].value
= 0;
1511 brp_list
[brp_i
].control
= 0;
1512 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1513 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1514 brp_list
[brp_i
].control
);
1515 if (retval
!= ERROR_OK
)
1517 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1518 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1519 brp_list
[brp_i
].value
);
1520 if (retval
!= ERROR_OK
)
1522 breakpoint
->set
= 0;
1526 /* restore original instruction (kept in target endianness) */
1527 if (breakpoint
->length
== 4) {
1528 retval
= target_write_memory(target
,
1529 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1530 4, 1, breakpoint
->orig_instr
);
1531 if (retval
!= ERROR_OK
)
1534 retval
= target_write_memory(target
,
1535 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1536 2, 1, breakpoint
->orig_instr
);
1537 if (retval
!= ERROR_OK
)
1541 breakpoint
->set
= 0;
1546 static int aarch64_add_breakpoint(struct target
*target
,
1547 struct breakpoint
*breakpoint
)
1549 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1551 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1552 LOG_INFO("no hardware breakpoint available");
1553 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1556 if (breakpoint
->type
== BKPT_HARD
)
1557 aarch64
->brp_num_available
--;
1559 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1562 static int aarch64_add_context_breakpoint(struct target
*target
,
1563 struct breakpoint
*breakpoint
)
1565 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1567 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1568 LOG_INFO("no hardware breakpoint available");
1569 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1572 if (breakpoint
->type
== BKPT_HARD
)
1573 aarch64
->brp_num_available
--;
1575 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1578 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1579 struct breakpoint
*breakpoint
)
1581 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1583 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1584 LOG_INFO("no hardware breakpoint available");
1585 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1588 if (breakpoint
->type
== BKPT_HARD
)
1589 aarch64
->brp_num_available
--;
1591 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1595 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1597 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1600 /* It is perfectly possible to remove breakpoints while the target is running */
1601 if (target
->state
!= TARGET_HALTED
) {
1602 LOG_WARNING("target not halted");
1603 return ERROR_TARGET_NOT_HALTED
;
1607 if (breakpoint
->set
) {
1608 aarch64_unset_breakpoint(target
, breakpoint
);
1609 if (breakpoint
->type
== BKPT_HARD
)
1610 aarch64
->brp_num_available
++;
1617 * Cortex-A8 Reset functions
1620 static int aarch64_assert_reset(struct target
*target
)
1622 struct armv8_common
*armv8
= target_to_armv8(target
);
1626 /* FIXME when halt is requested, make it work somehow... */
1628 /* Issue some kind of warm reset. */
1629 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1630 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1631 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1632 /* REVISIT handle "pulls" cases, if there's
1633 * hardware that needs them to work.
1635 jtag_add_reset(0, 1);
1637 LOG_ERROR("%s: how to reset?", target_name(target
));
1641 /* registers are now invalid */
1642 register_cache_invalidate(armv8
->arm
.core_cache
);
1644 target
->state
= TARGET_RESET
;
1649 static int aarch64_deassert_reset(struct target
*target
)
1655 /* be certain SRST is off */
1656 jtag_add_reset(0, 0);
1658 retval
= aarch64_poll(target
);
1659 if (retval
!= ERROR_OK
)
1662 if (target
->reset_halt
) {
1663 if (target
->state
!= TARGET_HALTED
) {
1664 LOG_WARNING("%s: ran after reset and before halt ...",
1665 target_name(target
));
1666 retval
= target_halt(target
);
1667 if (retval
!= ERROR_OK
)
1675 static int aarch64_write_apb_ab_memory(struct target
*target
,
1676 uint64_t address
, uint32_t size
,
1677 uint32_t count
, const uint8_t *buffer
)
1679 /* write memory through APB-AP */
1680 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1681 struct armv8_common
*armv8
= target_to_armv8(target
);
1682 struct arm
*arm
= &armv8
->arm
;
1683 int total_bytes
= count
* size
;
1685 int start_byte
= address
& 0x3;
1686 int end_byte
= (address
+ total_bytes
) & 0x3;
1689 uint8_t *tmp_buff
= NULL
;
1692 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count%" PRIu32
,
1693 address
, size
, count
);
1694 if (target
->state
!= TARGET_HALTED
) {
1695 LOG_WARNING("target not halted");
1696 return ERROR_TARGET_NOT_HALTED
;
1699 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1701 /* Mark register R0 as dirty, as it will be used
1702 * for transferring the data.
1703 * It will be restored automatically when exiting
1706 reg
= armv8_reg_current(arm
, 1);
1709 reg
= armv8_reg_current(arm
, 0);
1712 /* clear any abort */
1713 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, armv8
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1714 if (retval
!= ERROR_OK
)
1717 /* This algorithm comes from either :
1718 * Cortex-A8 TRM Example 12-25
1719 * Cortex-R4 TRM Example 11-26
1720 * (slight differences)
1723 /* The algorithm only copies 32 bit words, so the buffer
1724 * should be expanded to include the words at either end.
1725 * The first and last words will be read first to avoid
1726 * corruption if needed.
1728 tmp_buff
= malloc(total_u32
* 4);
1730 if ((start_byte
!= 0) && (total_u32
> 1)) {
1731 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1732 * the other bytes in the word.
1734 retval
= aarch64_read_apb_ab_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1735 if (retval
!= ERROR_OK
)
1736 goto error_free_buff_w
;
1739 /* If end of write is not aligned, or the write is less than 4 bytes */
1740 if ((end_byte
!= 0) ||
1741 ((total_u32
== 1) && (total_bytes
!= 4))) {
1743 /* Read the last word to avoid corruption during 32 bit write */
1744 int mem_offset
= (total_u32
-1) * 4;
1745 retval
= aarch64_read_apb_ab_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1746 if (retval
!= ERROR_OK
)
1747 goto error_free_buff_w
;
1750 /* Copy the write buffer over the top of the temporary buffer */
1751 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1753 /* We now have a 32 bit aligned buffer that can be written */
1756 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1757 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1758 if (retval
!= ERROR_OK
)
1759 goto error_free_buff_w
;
1761 /* Set DTR mode to Normal*/
1762 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_NON_BLOCKING
;
1763 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1764 armv8
->debug_base
+ CPUDBG_DSCR
, dscr
);
1765 if (retval
!= ERROR_OK
)
1766 goto error_free_buff_w
;
1769 LOG_WARNING("reading size >4 bytes not yet supported");
1770 goto error_unset_dtr_w
;
1773 retval
= aarch64_instr_write_data_dcc_64(arm
->dpm
, 0xd5330401, address
+4);
1774 if (retval
!= ERROR_OK
)
1775 goto error_unset_dtr_w
;
1777 dscr
= DSCR_INSTR_COMP
;
1778 while (i
< count
* size
) {
1781 memcpy(&val
, &buffer
[i
], size
);
1782 retval
= aarch64_instr_write_data_dcc(arm
->dpm
, 0xd5330500, val
);
1783 if (retval
!= ERROR_OK
)
1784 goto error_unset_dtr_w
;
1786 retval
= aarch64_exec_opcode(target
, 0xb81fc020, &dscr
);
1787 if (retval
!= ERROR_OK
)
1788 goto error_unset_dtr_w
;
1790 retval
= aarch64_exec_opcode(target
, 0x91001021, &dscr
);
1791 if (retval
!= ERROR_OK
)
1792 goto error_unset_dtr_w
;
1797 /* Check for sticky abort flags in the DSCR */
1798 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1799 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1800 if (retval
!= ERROR_OK
)
1801 goto error_free_buff_w
;
1802 if (dscr
& (DSCR_STICKY_ABORT_PRECISE
| DSCR_STICKY_ABORT_IMPRECISE
)) {
1803 /* Abort occurred - clear it and exit */
1804 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1805 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1806 armv8
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1807 goto error_free_buff_w
;
1815 /* Unset DTR mode */
1816 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1817 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1818 dscr
= (dscr
& ~DSCR_EXT_DCC_MASK
) | DSCR_EXT_DCC_NON_BLOCKING
;
1819 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1820 armv8
->debug_base
+ CPUDBG_DSCR
, dscr
);
1827 static int aarch64_read_apb_ab_memory(struct target
*target
,
1828 target_addr_t address
, uint32_t size
,
1829 uint32_t count
, uint8_t *buffer
)
1831 /* read memory through APB-AP */
1833 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1834 struct armv8_common
*armv8
= target_to_armv8(target
);
1835 struct arm
*arm
= &armv8
->arm
;
1838 uint8_t *tmp_buff
= NULL
;
1841 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count%" PRIu32
,
1842 address
, size
, count
);
1843 if (target
->state
!= TARGET_HALTED
) {
1844 LOG_WARNING("target not halted");
1845 return ERROR_TARGET_NOT_HALTED
;
1848 /* Mark register R0 as dirty, as it will be used
1849 * for transferring the data.
1850 * It will be restored automatically when exiting
1853 reg
= armv8_reg_current(arm
, 0);
1856 /* clear any abort */
1857 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1858 armv8
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1859 if (retval
!= ERROR_OK
)
1860 goto error_free_buff_r
;
1862 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1863 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1864 if (retval
!= ERROR_OK
)
1865 goto error_unset_dtr_r
;
1868 LOG_WARNING("reading size >4 bytes not yet supported");
1869 goto error_unset_dtr_r
;
1872 while (i
< count
* size
) {
1874 retval
= aarch64_instr_write_data_dcc_64(arm
->dpm
, 0xd5330400, address
+4);
1875 if (retval
!= ERROR_OK
)
1876 goto error_unset_dtr_r
;
1877 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1878 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1880 dscr
= DSCR_INSTR_COMP
;
1881 retval
= aarch64_exec_opcode(target
, 0xb85fc000, &dscr
);
1882 if (retval
!= ERROR_OK
)
1883 goto error_unset_dtr_r
;
1884 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1885 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
1887 retval
= aarch64_instr_read_data_dcc(arm
->dpm
, 0xd5130400, &val
);
1888 if (retval
!= ERROR_OK
)
1889 goto error_unset_dtr_r
;
1890 memcpy(&buffer
[i
], &val
, size
);
1895 /* Clear any sticky error */
1896 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1897 armv8
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1903 LOG_WARNING("DSCR = 0x%" PRIx32
, dscr
);
1904 /* Todo: Unset DTR mode */
1910 /* Clear any sticky error */
1911 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1912 armv8
->debug_base
+ CPUDBG_DRCR
, 1<<2);
1917 static int aarch64_read_phys_memory(struct target
*target
,
1918 target_addr_t address
, uint32_t size
,
1919 uint32_t count
, uint8_t *buffer
)
1921 struct armv8_common
*armv8
= target_to_armv8(target
);
1922 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1923 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1924 uint8_t apsel
= swjdp
->apsel
;
1925 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
1926 address
, size
, count
);
1928 if (count
&& buffer
) {
1930 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
1932 /* read memory through AHB-AP */
1933 retval
= mem_ap_read_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
1935 /* read memory through APB-AP */
1936 retval
= aarch64_mmu_modify(target
, 0);
1937 if (retval
!= ERROR_OK
)
1939 retval
= aarch64_read_apb_ab_memory(target
, address
, size
, count
, buffer
);
1945 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1946 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1948 int mmu_enabled
= 0;
1949 target_addr_t virt
, phys
;
1951 struct armv8_common
*armv8
= target_to_armv8(target
);
1952 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1953 uint8_t apsel
= swjdp
->apsel
;
1955 /* aarch64 handles unaligned memory access */
1956 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
1959 /* determine if MMU was enabled on target stop */
1960 if (!armv8
->is_armv7r
) {
1961 retval
= aarch64_mmu(target
, &mmu_enabled
);
1962 if (retval
!= ERROR_OK
)
1966 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
1969 retval
= aarch64_virt2phys(target
, virt
, &phys
);
1970 if (retval
!= ERROR_OK
)
1973 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
,
1977 retval
= aarch64_read_phys_memory(target
, address
, size
, count
,
1981 retval
= aarch64_check_address(target
, address
);
1982 if (retval
!= ERROR_OK
)
1984 /* enable MMU as we could have disabled it for phys
1986 retval
= aarch64_mmu_modify(target
, 1);
1987 if (retval
!= ERROR_OK
)
1990 retval
= aarch64_read_apb_ab_memory(target
, address
, size
,
1996 static int aarch64_write_phys_memory(struct target
*target
,
1997 target_addr_t address
, uint32_t size
,
1998 uint32_t count
, const uint8_t *buffer
)
2000 struct armv8_common
*armv8
= target_to_armv8(target
);
2001 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2002 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2003 uint8_t apsel
= swjdp
->apsel
;
2005 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2008 if (count
&& buffer
) {
2010 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2012 /* write memory through AHB-AP */
2013 retval
= mem_ap_write_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
2016 /* write memory through APB-AP */
2017 if (!armv8
->is_armv7r
) {
2018 retval
= aarch64_mmu_modify(target
, 0);
2019 if (retval
!= ERROR_OK
)
2022 return aarch64_write_apb_ab_memory(target
, address
, size
, count
, buffer
);
2027 /* REVISIT this op is generic ARMv7-A/R stuff */
2028 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
) {
2029 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
2031 retval
= dpm
->prepare(dpm
);
2032 if (retval
!= ERROR_OK
)
2035 /* The Cache handling will NOT work with MMU active, the
2036 * wrong addresses will be invalidated!
2038 * For both ICache and DCache, walk all cache lines in the
2039 * address range. Cortex-A8 has fixed 64 byte line length.
2041 * REVISIT per ARMv7, these may trigger watchpoints ...
2044 /* invalidate I-Cache */
2045 if (armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
) {
2046 /* ICIMVAU - Invalidate Cache single entry
2048 * MCR p15, 0, r0, c7, c5, 1
2050 for (uint32_t cacheline
= address
;
2051 cacheline
< address
+ size
* count
;
2053 retval
= dpm
->instr_write_data_r0(dpm
,
2054 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2056 if (retval
!= ERROR_OK
)
2061 /* invalidate D-Cache */
2062 if (armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
) {
2063 /* DCIMVAC - Invalidate data Cache line
2065 * MCR p15, 0, r0, c7, c6, 1
2067 for (uint32_t cacheline
= address
;
2068 cacheline
< address
+ size
* count
;
2070 retval
= dpm
->instr_write_data_r0(dpm
,
2071 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2073 if (retval
!= ERROR_OK
)
2078 /* (void) */ dpm
->finish(dpm
);
2084 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2085 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2087 int mmu_enabled
= 0;
2088 target_addr_t virt
, phys
;
2090 struct armv8_common
*armv8
= target_to_armv8(target
);
2091 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2092 uint8_t apsel
= swjdp
->apsel
;
2094 /* aarch64 handles unaligned memory access */
2095 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
2096 "; count %" PRId32
, address
, size
, count
);
2098 /* determine if MMU was enabled on target stop */
2099 if (!armv8
->is_armv7r
) {
2100 retval
= aarch64_mmu(target
, &mmu_enabled
);
2101 if (retval
!= ERROR_OK
)
2105 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2106 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR
"; size %"
2107 PRId32
"; count %" PRId32
, address
, size
, count
);
2110 retval
= aarch64_virt2phys(target
, virt
, &phys
);
2111 if (retval
!= ERROR_OK
)
2114 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2115 TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
, virt
, phys
);
2118 retval
= aarch64_write_phys_memory(target
, address
, size
,
2122 retval
= aarch64_check_address(target
, address
);
2123 if (retval
!= ERROR_OK
)
2125 /* enable MMU as we could have disabled it for phys access */
2126 retval
= aarch64_mmu_modify(target
, 1);
2127 if (retval
!= ERROR_OK
)
2130 retval
= aarch64_write_apb_ab_memory(target
, address
, size
, count
, buffer
);
2135 static int aarch64_handle_target_request(void *priv
)
2137 struct target
*target
= priv
;
2138 struct armv8_common
*armv8
= target_to_armv8(target
);
2141 if (!target_was_examined(target
))
2143 if (!target
->dbg_msg_enabled
)
2146 if (target
->state
== TARGET_RUNNING
) {
2149 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2150 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2152 /* check if we have data */
2153 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2154 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2155 armv8
->debug_base
+ CPUDBG_DTRTX
, &request
);
2156 if (retval
== ERROR_OK
) {
2157 target_request(target
, request
);
2158 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2159 armv8
->debug_base
+ CPUDBG_DSCR
, &dscr
);
2167 static int aarch64_examine_first(struct target
*target
)
2169 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2170 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2171 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2172 int retval
= ERROR_OK
;
2173 uint32_t pfr
, debug
, ctypr
, ttypr
, cpuid
;
2176 /* We do one extra read to ensure DAP is configured,
2177 * we call ahbap_debugport_init(swjdp) instead
2179 retval
= dap_dp_init(swjdp
);
2180 if (retval
!= ERROR_OK
)
2183 /* Search for the APB-AB - it is needed for access to debug registers */
2184 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2185 if (retval
!= ERROR_OK
) {
2186 LOG_ERROR("Could not find APB-AP for debug access");
2190 retval
= mem_ap_init(armv8
->debug_ap
);
2191 if (retval
!= ERROR_OK
) {
2192 LOG_ERROR("Could not initialize the APB-AP");
2196 armv8
->debug_ap
->memaccess_tck
= 80;
2198 /* Search for the AHB-AB */
2199 armv8
->memory_ap_available
= false;
2200 retval
= dap_find_ap(swjdp
, AP_TYPE_AHB_AP
, &armv8
->memory_ap
);
2201 if (retval
== ERROR_OK
) {
2202 retval
= mem_ap_init(armv8
->memory_ap
);
2203 if (retval
== ERROR_OK
)
2204 armv8
->memory_ap_available
= true;
2206 if (retval
!= ERROR_OK
) {
2207 /* AHB-AP not found or unavailable - use the CPU */
2208 LOG_DEBUG("No AHB-AP available for memory access");
2212 if (!target
->dbgbase_set
) {
2214 /* Get ROM Table base */
2216 int32_t coreidx
= target
->coreid
;
2217 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2218 if (retval
!= ERROR_OK
)
2220 /* Lookup 0x15 -- Processor DAP */
2221 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2222 &armv8
->debug_base
, &coreidx
);
2223 if (retval
!= ERROR_OK
)
2225 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
,
2226 coreidx
, armv8
->debug_base
);
2228 armv8
->debug_base
= target
->dbgbase
;
2230 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2231 armv8
->debug_base
+ 0x300, 0);
2232 if (retval
!= ERROR_OK
) {
2233 LOG_DEBUG("Examine %s failed", "oslock");
2237 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2238 armv8
->debug_base
+ 0x88, &cpuid
);
2239 LOG_DEBUG("0x88 = %x", cpuid
);
2241 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2242 armv8
->debug_base
+ 0x314, &cpuid
);
2243 LOG_DEBUG("0x314 = %x", cpuid
);
2245 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2246 armv8
->debug_base
+ 0x310, &cpuid
);
2247 LOG_DEBUG("0x310 = %x", cpuid
);
2248 if (retval
!= ERROR_OK
)
2251 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2252 armv8
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
2253 if (retval
!= ERROR_OK
) {
2254 LOG_DEBUG("Examine %s failed", "CPUID");
2258 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2259 armv8
->debug_base
+ CPUDBG_CTYPR
, &ctypr
);
2260 if (retval
!= ERROR_OK
) {
2261 LOG_DEBUG("Examine %s failed", "CTYPR");
2265 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2266 armv8
->debug_base
+ CPUDBG_TTYPR
, &ttypr
);
2267 if (retval
!= ERROR_OK
) {
2268 LOG_DEBUG("Examine %s failed", "TTYPR");
2272 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2273 armv8
->debug_base
+ ID_AA64PFR0_EL1
, &pfr
);
2274 if (retval
!= ERROR_OK
) {
2275 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2278 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2279 armv8
->debug_base
+ ID_AA64DFR0_EL1
, &debug
);
2280 if (retval
!= ERROR_OK
) {
2281 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2285 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2286 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
2287 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
2288 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32
, pfr
);
2289 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32
, debug
);
2291 armv8
->arm
.core_type
= ARM_MODE_MON
;
2292 armv8
->arm
.core_state
= ARM_STATE_AARCH64
;
2293 retval
= aarch64_dpm_setup(aarch64
, debug
);
2294 if (retval
!= ERROR_OK
)
2297 /* Setup Breakpoint Register Pairs */
2298 aarch64
->brp_num
= ((debug
>> 12) & 0x0F) + 1;
2299 aarch64
->brp_num_context
= ((debug
>> 28) & 0x0F) + 1;
2301 /* hack - no context bpt support yet */
2302 aarch64
->brp_num_context
= 0;
2304 aarch64
->brp_num_available
= aarch64
->brp_num
;
2305 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2306 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2307 aarch64
->brp_list
[i
].used
= 0;
2308 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2309 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2311 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2312 aarch64
->brp_list
[i
].value
= 0;
2313 aarch64
->brp_list
[i
].control
= 0;
2314 aarch64
->brp_list
[i
].BRPn
= i
;
2317 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2319 target_set_examined(target
);
2323 static int aarch64_examine(struct target
*target
)
2325 int retval
= ERROR_OK
;
2327 /* don't re-probe hardware after each reset */
2328 if (!target_was_examined(target
))
2329 retval
= aarch64_examine_first(target
);
2331 /* Configure core debug access */
2332 if (retval
== ERROR_OK
)
2333 retval
= aarch64_init_debug_access(target
);
2339 * Cortex-A8 target creation and initialization
2342 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2343 struct target
*target
)
2345 /* examine_first() does a bunch of this */
2349 static int aarch64_init_arch_info(struct target
*target
,
2350 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2352 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2353 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
2355 armv8
->arm
.dap
= dap
;
2357 /* Setup struct aarch64_common */
2358 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2359 /* tap has no dap initialized */
2361 tap
->dap
= dap_init();
2363 /* Leave (only) generic DAP stuff for debugport_init() */
2364 tap
->dap
->tap
= tap
;
2367 armv8
->arm
.dap
= tap
->dap
;
2369 aarch64
->fast_reg_read
= 0;
2371 /* register arch-specific functions */
2372 armv8
->examine_debug_reason
= NULL
;
2374 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2376 armv8
->pre_restore_context
= NULL
;
2378 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2380 /* REVISIT v7a setup should be in a v7a-specific routine */
2381 armv8_init_arch_info(target
, armv8
);
2382 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2387 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2389 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2391 aarch64
->armv8_common
.is_armv7r
= false;
2393 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2396 static int aarch64_mmu(struct target
*target
, int *enabled
)
2398 if (target
->state
!= TARGET_HALTED
) {
2399 LOG_ERROR("%s: target not halted", __func__
);
2400 return ERROR_TARGET_INVALID
;
2403 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2407 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2408 target_addr_t
*phys
)
2410 int retval
= ERROR_FAIL
;
2411 struct armv8_common
*armv8
= target_to_armv8(target
);
2412 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2413 uint8_t apsel
= swjdp
->apsel
;
2414 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2416 retval
= armv8_mmu_translate_va(target
,
2418 if (retval
!= ERROR_OK
)
2421 } else {/* use this method if armv8->memory_ap not selected
2422 * mmu must be enable in order to get a correct translation */
2423 retval
= aarch64_mmu_modify(target
, 1);
2424 if (retval
!= ERROR_OK
)
2426 retval
= armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2432 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2434 struct target
*target
= get_current_target(CMD_CTX
);
2435 struct armv8_common
*armv8
= target_to_armv8(target
);
2437 return armv8_handle_cache_info_command(CMD_CTX
,
2438 &armv8
->armv8_mmu
.armv8_cache
);
2442 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2444 struct target
*target
= get_current_target(CMD_CTX
);
2445 if (!target_was_examined(target
)) {
2446 LOG_ERROR("target not examined yet");
2450 return aarch64_init_debug_access(target
);
2452 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2454 struct target
*target
= get_current_target(CMD_CTX
);
2455 /* check target is an smp target */
2456 struct target_list
*head
;
2457 struct target
*curr
;
2458 head
= target
->head
;
2460 if (head
!= (struct target_list
*)NULL
) {
2461 while (head
!= (struct target_list
*)NULL
) {
2462 curr
= head
->target
;
2466 /* fixes the target display to the debugger */
2467 target
->gdb_service
->target
= target
;
2472 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2474 struct target
*target
= get_current_target(CMD_CTX
);
2475 struct target_list
*head
;
2476 struct target
*curr
;
2477 head
= target
->head
;
2478 if (head
!= (struct target_list
*)NULL
) {
2480 while (head
!= (struct target_list
*)NULL
) {
2481 curr
= head
->target
;
2489 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2491 struct target
*target
= get_current_target(CMD_CTX
);
2492 int retval
= ERROR_OK
;
2493 struct target_list
*head
;
2494 head
= target
->head
;
2495 if (head
!= (struct target_list
*)NULL
) {
2496 if (CMD_ARGC
== 1) {
2498 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2499 if (ERROR_OK
!= retval
)
2501 target
->gdb_service
->core
[1] = coreid
;
2504 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2505 , target
->gdb_service
->core
[1]);
2510 static const struct command_registration aarch64_exec_command_handlers
[] = {
2512 .name
= "cache_info",
2513 .handler
= aarch64_handle_cache_info_command
,
2514 .mode
= COMMAND_EXEC
,
2515 .help
= "display information about target caches",
2520 .handler
= aarch64_handle_dbginit_command
,
2521 .mode
= COMMAND_EXEC
,
2522 .help
= "Initialize core debug",
2525 { .name
= "smp_off",
2526 .handler
= aarch64_handle_smp_off_command
,
2527 .mode
= COMMAND_EXEC
,
2528 .help
= "Stop smp handling",
2533 .handler
= aarch64_handle_smp_on_command
,
2534 .mode
= COMMAND_EXEC
,
2535 .help
= "Restart smp handling",
2540 .handler
= aarch64_handle_smp_gdb_command
,
2541 .mode
= COMMAND_EXEC
,
2542 .help
= "display/fix current core played to gdb",
2547 COMMAND_REGISTRATION_DONE
2549 static const struct command_registration aarch64_command_handlers
[] = {
2551 .chain
= arm_command_handlers
,
2554 .chain
= armv8_command_handlers
,
2558 .mode
= COMMAND_ANY
,
2559 .help
= "Cortex-A command group",
2561 .chain
= aarch64_exec_command_handlers
,
2563 COMMAND_REGISTRATION_DONE
2566 struct target_type aarch64_target
= {
2569 .poll
= aarch64_poll
,
2570 .arch_state
= armv8_arch_state
,
2572 .halt
= aarch64_halt
,
2573 .resume
= aarch64_resume
,
2574 .step
= aarch64_step
,
2576 .assert_reset
= aarch64_assert_reset
,
2577 .deassert_reset
= aarch64_deassert_reset
,
2579 /* REVISIT allow exporting VFP3 registers ... */
2580 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2582 .read_memory
= aarch64_read_memory
,
2583 .write_memory
= aarch64_write_memory
,
2585 .checksum_memory
= arm_checksum_memory
,
2586 .blank_check_memory
= arm_blank_check_memory
,
2588 .run_algorithm
= armv4_5_run_algorithm
,
2590 .add_breakpoint
= aarch64_add_breakpoint
,
2591 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2592 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2593 .remove_breakpoint
= aarch64_remove_breakpoint
,
2594 .add_watchpoint
= NULL
,
2595 .remove_watchpoint
= NULL
,
2597 .commands
= aarch64_command_handlers
,
2598 .target_create
= aarch64_target_create
,
2599 .init_target
= aarch64_init_target
,
2600 .examine
= aarch64_examine
,
2602 .read_phys_memory
= aarch64_read_phys_memory
,
2603 .write_phys_memory
= aarch64_write_phys_memory
,
2605 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)