aarch64: fix entry into debug state
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
93 }
94 }
95 return retval;
96 }
97
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
101 {
102 /* TODO */
103 return ERROR_OK;
104 }
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
109 {
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
113
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
119 }
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 }
154 }
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
162 }
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
197 }
198 }
199 }
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG(" ");
213
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
224 }
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
235
236 /* Resync breakpoint registers */
237
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
240 }
241
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
246 */
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
249 {
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
254
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
256
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
265 }
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
276
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
284 }
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
288 }
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
290
291 if (dscr_p)
292 *dscr_p = dscr;
293
294 return retval;
295 }
296
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
301 {
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
304
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
306
307 return retval;
308 }
309
310 /*
311 * AARCH64 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
324 }
325
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
331 }
332
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
334 {
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
343 }
344
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
347 {
348 uint32_t dscr = DSCR_ITE;
349 int retval;
350
351 if (dscr_p)
352 dscr = *dscr_p;
353
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
365 }
366 }
367
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
383 {
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
387
388 if (dscr_p)
389 dscr = *dscr_p;
390
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
402 }
403 }
404
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
410
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
416
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
419
420 if (dscr_p)
421 *dscr_p = dscr;
422
423 return retval;
424 }
425
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
427 {
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
431
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
445 }
446 }
447
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463
464 return retval;
465 }
466
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
468 {
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
471 }
472
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
475 {
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
478
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
483 }
484
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
491
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
500 }
501
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
504 {
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
508
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
517 }
518
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
521 {
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
525
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
529
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
542
543 return retval;
544 }
545
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
548 {
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
552
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
556
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
569
570 return retval;
571 }
572
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
574 {
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
577
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 DSB_SY,
581 &dscr);
582 }
583
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
586 {
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
590
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
598
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
600 }
601
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
604 {
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
608
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
618 }
619
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
622 {
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
626
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
642
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
644 }
645
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
648 {
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
652
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
660
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
668
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
670 }
671
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
674 {
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
679
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
692 }
693 vr += 4 * index_t;
694 cr += 4 * index_t;
695
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
698
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
706 }
707
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
709 {
710 return ERROR_OK;
711
712 #if 0
713 struct aarch64_common *a = dpm_to_a8(dpm);
714 uint32_t cr;
715
716 switch (index_t) {
717 case 0 ... 15:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
719 break;
720 case 16 ... 31:
721 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
722 index_t -= 16;
723 break;
724 default:
725 return ERROR_FAIL;
726 }
727 cr += 4 * index_t;
728
729 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
730
731 /* clear control register */
732 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
733 #endif
734 }
735
736 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
737 {
738 struct arm_dpm *dpm = &a8->armv8_common.dpm;
739 int retval;
740
741 dpm->arm = &a8->armv8_common.arm;
742 dpm->didr = debug;
743
744 dpm->prepare = aarch64_dpm_prepare;
745 dpm->finish = aarch64_dpm_finish;
746
747 dpm->instr_execute = aarch64_instr_execute;
748 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
749 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
750 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
751 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
752 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
753
754 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
755 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
756 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
757 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
758
759 dpm->arm_reg_current = armv8_reg_current;
760
761 dpm->bpwp_enable = aarch64_bpwp_enable;
762 dpm->bpwp_disable = aarch64_bpwp_disable;
763
764 retval = armv8_dpm_setup(dpm);
765 if (retval == ERROR_OK)
766 retval = armv8_dpm_initialize(dpm);
767
768 return retval;
769 }
770 static struct target *get_aarch64(struct target *target, int32_t coreid)
771 {
772 struct target_list *head;
773 struct target *curr;
774
775 head = target->head;
776 while (head != (struct target_list *)NULL) {
777 curr = head->target;
778 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
779 return curr;
780 head = head->next;
781 }
782 return target;
783 }
784 static int aarch64_halt(struct target *target);
785
786 static int aarch64_halt_smp(struct target *target)
787 {
788 int retval = 0;
789 struct target_list *head;
790 struct target *curr;
791 head = target->head;
792 while (head != (struct target_list *)NULL) {
793 curr = head->target;
794 if ((curr != target) && (curr->state != TARGET_HALTED))
795 retval += aarch64_halt(curr);
796 head = head->next;
797 }
798 return retval;
799 }
800
801 static int update_halt_gdb(struct target *target)
802 {
803 int retval = 0;
804 if (target->gdb_service && target->gdb_service->core[0] == -1) {
805 target->gdb_service->target = target;
806 target->gdb_service->core[0] = target->coreid;
807 retval += aarch64_halt_smp(target);
808 }
809 return retval;
810 }
811
812 /*
813 * Cortex-A8 Run control
814 */
815
816 static int aarch64_poll(struct target *target)
817 {
818 int retval = ERROR_OK;
819 uint32_t dscr;
820 struct aarch64_common *aarch64 = target_to_aarch64(target);
821 struct armv8_common *armv8 = &aarch64->armv8_common;
822 enum target_state prev_target_state = target->state;
823 /* toggle to another core is done by gdb as follow */
824 /* maint packet J core_id */
825 /* continue */
826 /* the next polling trigger an halt event sent to gdb */
827 if ((target->state == TARGET_HALTED) && (target->smp) &&
828 (target->gdb_service) &&
829 (target->gdb_service->target == NULL)) {
830 target->gdb_service->target =
831 get_aarch64(target, target->gdb_service->core[1]);
832 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
833 return retval;
834 }
835 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
836 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
837 if (retval != ERROR_OK)
838 return retval;
839 aarch64->cpudbg_dscr = dscr;
840
841 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
842 if (prev_target_state != TARGET_HALTED) {
843 /* We have a halting debug event */
844 LOG_DEBUG("Target halted");
845 target->state = TARGET_HALTED;
846 if ((prev_target_state == TARGET_RUNNING)
847 || (prev_target_state == TARGET_UNKNOWN)
848 || (prev_target_state == TARGET_RESET)) {
849 retval = aarch64_debug_entry(target);
850 if (retval != ERROR_OK)
851 return retval;
852 if (target->smp) {
853 retval = update_halt_gdb(target);
854 if (retval != ERROR_OK)
855 return retval;
856 }
857 target_call_event_callbacks(target,
858 TARGET_EVENT_HALTED);
859 }
860 if (prev_target_state == TARGET_DEBUG_RUNNING) {
861 LOG_DEBUG(" ");
862
863 retval = aarch64_debug_entry(target);
864 if (retval != ERROR_OK)
865 return retval;
866 if (target->smp) {
867 retval = update_halt_gdb(target);
868 if (retval != ERROR_OK)
869 return retval;
870 }
871
872 target_call_event_callbacks(target,
873 TARGET_EVENT_DEBUG_HALTED);
874 }
875 }
876 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
877 target->state = TARGET_RUNNING;
878 else {
879 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
880 target->state = TARGET_UNKNOWN;
881 }
882
883 return retval;
884 }
885
886 static int aarch64_halt(struct target *target)
887 {
888 int retval = ERROR_OK;
889 uint32_t dscr;
890 struct armv8_common *armv8 = target_to_armv8(target);
891
892 /* enable CTI*/
893 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
894 armv8->cti_base + CTI_CTR, 1);
895 if (retval != ERROR_OK)
896 return retval;
897
898 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
899 armv8->cti_base + CTI_GATE, 3);
900 if (retval != ERROR_OK)
901 return retval;
902
903 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
904 armv8->cti_base + CTI_OUTEN0, 1);
905 if (retval != ERROR_OK)
906 return retval;
907
908 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
909 armv8->cti_base + CTI_OUTEN1, 2);
910 if (retval != ERROR_OK)
911 return retval;
912
913 /*
914 * add HDE in halting debug mode
915 */
916 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
917 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
918 if (retval != ERROR_OK)
919 return retval;
920
921 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
922 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
923 if (retval != ERROR_OK)
924 return retval;
925
926 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
927 armv8->cti_base + CTI_APPPULSE, 1);
928 if (retval != ERROR_OK)
929 return retval;
930
931 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
932 armv8->cti_base + CTI_INACK, 1);
933 if (retval != ERROR_OK)
934 return retval;
935
936
937 long long then = timeval_ms();
938 for (;; ) {
939 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
940 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
941 if (retval != ERROR_OK)
942 return retval;
943 if ((dscr & DSCRV8_HALT_MASK) != 0)
944 break;
945 if (timeval_ms() > then + 1000) {
946 LOG_ERROR("Timeout waiting for halt");
947 return ERROR_FAIL;
948 }
949 }
950
951 target->debug_reason = DBG_REASON_DBGRQ;
952
953 return ERROR_OK;
954 }
955
956 static int aarch64_internal_restore(struct target *target, int current,
957 uint64_t *address, int handle_breakpoints, int debug_execution)
958 {
959 struct armv8_common *armv8 = target_to_armv8(target);
960 struct arm *arm = &armv8->arm;
961 int retval;
962 uint64_t resume_pc;
963
964 if (!debug_execution)
965 target_free_all_working_areas(target);
966
967 /* current = 1: continue on current pc, otherwise continue at <address> */
968 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
969 if (!current)
970 resume_pc = *address;
971 else
972 *address = resume_pc;
973
974 /* Make sure that the Armv7 gdb thumb fixups does not
975 * kill the return address
976 */
977 switch (arm->core_state) {
978 case ARM_STATE_ARM:
979 resume_pc &= 0xFFFFFFFC;
980 break;
981 case ARM_STATE_AARCH64:
982 resume_pc &= 0xFFFFFFFFFFFFFFFC;
983 break;
984 case ARM_STATE_THUMB:
985 case ARM_STATE_THUMB_EE:
986 /* When the return address is loaded into PC
987 * bit 0 must be 1 to stay in Thumb state
988 */
989 resume_pc |= 0x1;
990 break;
991 case ARM_STATE_JAZELLE:
992 LOG_ERROR("How do I resume into Jazelle state??");
993 return ERROR_FAIL;
994 }
995 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
996 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
997 arm->pc->dirty = 1;
998 arm->pc->valid = 1;
999 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1000
1001 /* called it now before restoring context because it uses cpu
1002 * register r0 for restoring system control register */
1003 retval = aarch64_restore_system_control_reg(target);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 retval = aarch64_restore_context(target, handle_breakpoints);
1007 if (retval != ERROR_OK)
1008 return retval;
1009 target->debug_reason = DBG_REASON_NOTHALTED;
1010 target->state = TARGET_RUNNING;
1011
1012 /* registers are now invalid */
1013 register_cache_invalidate(arm->core_cache);
1014
1015 #if 0
1016 /* the front-end may request us not to handle breakpoints */
1017 if (handle_breakpoints) {
1018 /* Single step past breakpoint at current address */
1019 breakpoint = breakpoint_find(target, resume_pc);
1020 if (breakpoint) {
1021 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1022 cortex_m3_unset_breakpoint(target, breakpoint);
1023 cortex_m3_single_step_core(target);
1024 cortex_m3_set_breakpoint(target, breakpoint);
1025 }
1026 }
1027 #endif
1028
1029 return retval;
1030 }
1031
1032 static int aarch64_internal_restart(struct target *target)
1033 {
1034 struct armv8_common *armv8 = target_to_armv8(target);
1035 struct arm *arm = &armv8->arm;
1036 int retval;
1037 uint32_t dscr;
1038 /*
1039 * * Restart core and wait for it to be started. Clear ITRen and sticky
1040 * * exception flags: see ARMv7 ARM, C5.9.
1041 *
1042 * REVISIT: for single stepping, we probably want to
1043 * disable IRQs by default, with optional override...
1044 */
1045
1046 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1047 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1048 if (retval != ERROR_OK)
1049 return retval;
1050
1051 if ((dscr & DSCR_ITE) == 0)
1052 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1053
1054 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1055 armv8->cti_base + CTI_APPPULSE, 2);
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 long long then = timeval_ms();
1060 for (;; ) {
1061 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1062 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 if ((dscr & DSCR_HDE) != 0)
1066 break;
1067 if (timeval_ms() > then + 1000) {
1068 LOG_ERROR("Timeout waiting for resume");
1069 return ERROR_FAIL;
1070 }
1071 }
1072
1073 target->debug_reason = DBG_REASON_NOTHALTED;
1074 target->state = TARGET_RUNNING;
1075
1076 /* registers are now invalid */
1077 register_cache_invalidate(arm->core_cache);
1078
1079 return ERROR_OK;
1080 }
1081
1082 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1083 {
1084 int retval = 0;
1085 struct target_list *head;
1086 struct target *curr;
1087 uint64_t address;
1088 head = target->head;
1089 while (head != (struct target_list *)NULL) {
1090 curr = head->target;
1091 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1092 /* resume current address , not in step mode */
1093 retval += aarch64_internal_restore(curr, 1, &address,
1094 handle_breakpoints, 0);
1095 retval += aarch64_internal_restart(curr);
1096 }
1097 head = head->next;
1098
1099 }
1100 return retval;
1101 }
1102
1103 static int aarch64_resume(struct target *target, int current,
1104 target_addr_t address, int handle_breakpoints, int debug_execution)
1105 {
1106 int retval = 0;
1107 uint64_t addr = address;
1108
1109 /* dummy resume for smp toggle in order to reduce gdb impact */
1110 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1111 /* simulate a start and halt of target */
1112 target->gdb_service->target = NULL;
1113 target->gdb_service->core[0] = target->gdb_service->core[1];
1114 /* fake resume at next poll we play the target core[1], see poll*/
1115 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1116 return 0;
1117 }
1118 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1119 debug_execution);
1120 if (target->smp) {
1121 target->gdb_service->core[0] = -1;
1122 retval = aarch64_restore_smp(target, handle_breakpoints);
1123 if (retval != ERROR_OK)
1124 return retval;
1125 }
1126 aarch64_internal_restart(target);
1127
1128 if (!debug_execution) {
1129 target->state = TARGET_RUNNING;
1130 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1131 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1132 } else {
1133 target->state = TARGET_DEBUG_RUNNING;
1134 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1135 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1136 }
1137
1138 return ERROR_OK;
1139 }
1140
1141 static int aarch64_debug_entry(struct target *target)
1142 {
1143 int retval = ERROR_OK;
1144 struct aarch64_common *aarch64 = target_to_aarch64(target);
1145 struct armv8_common *armv8 = target_to_armv8(target);
1146
1147 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1148
1149 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1150 * imprecise data aborts get discarded by issuing a Data
1151 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1152 */
1153
1154 /* make sure to clear all sticky errors */
1155 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1156 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1157 if (retval != ERROR_OK)
1158 return retval;
1159
1160 /* Examine debug reason */
1161 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1162
1163 /* save address of instruction that triggered the watchpoint? */
1164 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1165 uint32_t tmp;
1166 uint64_t wfar = 0;
1167
1168 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1169 armv8->debug_base + CPUV8_DBG_WFAR1,
1170 &tmp);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 wfar = tmp;
1174 wfar = (wfar << 32);
1175 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1176 armv8->debug_base + CPUV8_DBG_WFAR0,
1177 &tmp);
1178 if (retval != ERROR_OK)
1179 return retval;
1180 wfar |= tmp;
1181 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1182 }
1183
1184 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1185
1186 if (armv8->post_debug_entry) {
1187 retval = armv8->post_debug_entry(target);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 }
1191
1192 return retval;
1193 }
1194
1195 static int aarch64_post_debug_entry(struct target *target)
1196 {
1197 struct aarch64_common *aarch64 = target_to_aarch64(target);
1198 struct armv8_common *armv8 = &aarch64->armv8_common;
1199 int retval;
1200
1201 mem_ap_write_atomic_u32(armv8->debug_ap,
1202 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1203 switch (armv8->arm.core_mode) {
1204 case ARMV8_64_EL0T:
1205 case ARMV8_64_EL1T:
1206 case ARMV8_64_EL1H:
1207 retval = armv8->arm.mrs(target, 3, /*op 0*/
1208 0, 0, /* op1, op2 */
1209 1, 0, /* CRn, CRm */
1210 &aarch64->system_control_reg);
1211 if (retval != ERROR_OK)
1212 return retval;
1213 break;
1214 case ARMV8_64_EL2T:
1215 case ARMV8_64_EL2H:
1216 retval = armv8->arm.mrs(target, 3, /*op 0*/
1217 4, 0, /* op1, op2 */
1218 1, 0, /* CRn, CRm */
1219 &aarch64->system_control_reg);
1220 if (retval != ERROR_OK)
1221 return retval;
1222 break;
1223 case ARMV8_64_EL3H:
1224 case ARMV8_64_EL3T:
1225 retval = armv8->arm.mrs(target, 3, /*op 0*/
1226 6, 0, /* op1, op2 */
1227 1, 0, /* CRn, CRm */
1228 &aarch64->system_control_reg);
1229 if (retval != ERROR_OK)
1230 return retval;
1231 break;
1232 default:
1233 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1234 }
1235 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1236 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1237
1238 #if 0
1239 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1240 armv8_identify_cache(target);
1241 #endif
1242
1243 armv8->armv8_mmu.mmu_enabled =
1244 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1245 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1246 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1247 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1248 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1249 aarch64->curr_mode = armv8->arm.core_mode;
1250 return ERROR_OK;
1251 }
1252
1253 static int aarch64_step(struct target *target, int current, target_addr_t address,
1254 int handle_breakpoints)
1255 {
1256 struct armv8_common *armv8 = target_to_armv8(target);
1257 int retval;
1258 uint32_t tmp;
1259
1260 if (target->state != TARGET_HALTED) {
1261 LOG_WARNING("target not halted");
1262 return ERROR_TARGET_NOT_HALTED;
1263 }
1264
1265 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1266 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1267 if (retval != ERROR_OK)
1268 return retval;
1269
1270 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1271 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1272 if (retval != ERROR_OK)
1273 return retval;
1274
1275 target->debug_reason = DBG_REASON_SINGLESTEP;
1276 retval = aarch64_resume(target, 1, address, 0, 0);
1277 if (retval != ERROR_OK)
1278 return retval;
1279
1280 long long then = timeval_ms();
1281 while (target->state != TARGET_HALTED) {
1282 mem_ap_read_atomic_u32(armv8->debug_ap,
1283 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1284 LOG_DEBUG("DESR = %#x", tmp);
1285 retval = aarch64_poll(target);
1286 if (retval != ERROR_OK)
1287 return retval;
1288 if (timeval_ms() > then + 1000) {
1289 LOG_ERROR("timeout waiting for target halt");
1290 return ERROR_FAIL;
1291 }
1292 }
1293
1294 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1295 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1296 if (retval != ERROR_OK)
1297 return retval;
1298
1299 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1300 if (target->state == TARGET_HALTED)
1301 LOG_DEBUG("target stepped");
1302
1303 return ERROR_OK;
1304 }
1305
1306 static int aarch64_restore_context(struct target *target, bool bpwp)
1307 {
1308 struct armv8_common *armv8 = target_to_armv8(target);
1309
1310 LOG_DEBUG(" ");
1311
1312 if (armv8->pre_restore_context)
1313 armv8->pre_restore_context(target);
1314
1315 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1316
1317 }
1318
1319 /*
1320 * Cortex-A8 Breakpoint and watchpoint functions
1321 */
1322
1323 /* Setup hardware Breakpoint Register Pair */
1324 static int aarch64_set_breakpoint(struct target *target,
1325 struct breakpoint *breakpoint, uint8_t matchmode)
1326 {
1327 int retval;
1328 int brp_i = 0;
1329 uint32_t control;
1330 uint8_t byte_addr_select = 0x0F;
1331 struct aarch64_common *aarch64 = target_to_aarch64(target);
1332 struct armv8_common *armv8 = &aarch64->armv8_common;
1333 struct aarch64_brp *brp_list = aarch64->brp_list;
1334 uint32_t dscr;
1335
1336 if (breakpoint->set) {
1337 LOG_WARNING("breakpoint already set");
1338 return ERROR_OK;
1339 }
1340
1341 if (breakpoint->type == BKPT_HARD) {
1342 int64_t bpt_value;
1343 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1344 brp_i++;
1345 if (brp_i >= aarch64->brp_num) {
1346 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1347 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1348 }
1349 breakpoint->set = brp_i + 1;
1350 if (breakpoint->length == 2)
1351 byte_addr_select = (3 << (breakpoint->address & 0x02));
1352 control = ((matchmode & 0x7) << 20)
1353 | (1 << 13)
1354 | (byte_addr_select << 5)
1355 | (3 << 1) | 1;
1356 brp_list[brp_i].used = 1;
1357 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1358 brp_list[brp_i].control = control;
1359 bpt_value = brp_list[brp_i].value;
1360
1361 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1362 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1363 (uint32_t)(bpt_value & 0xFFFFFFFF));
1364 if (retval != ERROR_OK)
1365 return retval;
1366 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1367 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1368 (uint32_t)(bpt_value >> 32));
1369 if (retval != ERROR_OK)
1370 return retval;
1371
1372 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1373 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1374 brp_list[brp_i].control);
1375 if (retval != ERROR_OK)
1376 return retval;
1377 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1378 brp_list[brp_i].control,
1379 brp_list[brp_i].value);
1380
1381 } else if (breakpoint->type == BKPT_SOFT) {
1382 uint8_t code[4];
1383 buf_set_u32(code, 0, 32, ARMV8_BKPT(0x11));
1384 retval = target_read_memory(target,
1385 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1386 breakpoint->length, 1,
1387 breakpoint->orig_instr);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 retval = target_write_memory(target,
1391 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1392 breakpoint->length, 1, code);
1393 if (retval != ERROR_OK)
1394 return retval;
1395 breakpoint->set = 0x11; /* Any nice value but 0 */
1396 }
1397
1398 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1399 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1400 /* Ensure that halting debug mode is enable */
1401 dscr = dscr | DSCR_HDE;
1402 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1403 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1404 if (retval != ERROR_OK) {
1405 LOG_DEBUG("Failed to set DSCR.HDE");
1406 return retval;
1407 }
1408
1409 return ERROR_OK;
1410 }
1411
1412 static int aarch64_set_context_breakpoint(struct target *target,
1413 struct breakpoint *breakpoint, uint8_t matchmode)
1414 {
1415 int retval = ERROR_FAIL;
1416 int brp_i = 0;
1417 uint32_t control;
1418 uint8_t byte_addr_select = 0x0F;
1419 struct aarch64_common *aarch64 = target_to_aarch64(target);
1420 struct armv8_common *armv8 = &aarch64->armv8_common;
1421 struct aarch64_brp *brp_list = aarch64->brp_list;
1422
1423 if (breakpoint->set) {
1424 LOG_WARNING("breakpoint already set");
1425 return retval;
1426 }
1427 /*check available context BRPs*/
1428 while ((brp_list[brp_i].used ||
1429 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1430 brp_i++;
1431
1432 if (brp_i >= aarch64->brp_num) {
1433 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1434 return ERROR_FAIL;
1435 }
1436
1437 breakpoint->set = brp_i + 1;
1438 control = ((matchmode & 0x7) << 20)
1439 | (1 << 13)
1440 | (byte_addr_select << 5)
1441 | (3 << 1) | 1;
1442 brp_list[brp_i].used = 1;
1443 brp_list[brp_i].value = (breakpoint->asid);
1444 brp_list[brp_i].control = control;
1445 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1446 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1447 brp_list[brp_i].value);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1451 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1452 brp_list[brp_i].control);
1453 if (retval != ERROR_OK)
1454 return retval;
1455 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1456 brp_list[brp_i].control,
1457 brp_list[brp_i].value);
1458 return ERROR_OK;
1459
1460 }
1461
1462 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1463 {
1464 int retval = ERROR_FAIL;
1465 int brp_1 = 0; /* holds the contextID pair */
1466 int brp_2 = 0; /* holds the IVA pair */
1467 uint32_t control_CTX, control_IVA;
1468 uint8_t CTX_byte_addr_select = 0x0F;
1469 uint8_t IVA_byte_addr_select = 0x0F;
1470 uint8_t CTX_machmode = 0x03;
1471 uint8_t IVA_machmode = 0x01;
1472 struct aarch64_common *aarch64 = target_to_aarch64(target);
1473 struct armv8_common *armv8 = &aarch64->armv8_common;
1474 struct aarch64_brp *brp_list = aarch64->brp_list;
1475
1476 if (breakpoint->set) {
1477 LOG_WARNING("breakpoint already set");
1478 return retval;
1479 }
1480 /*check available context BRPs*/
1481 while ((brp_list[brp_1].used ||
1482 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1483 brp_1++;
1484
1485 printf("brp(CTX) found num: %d\n", brp_1);
1486 if (brp_1 >= aarch64->brp_num) {
1487 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1488 return ERROR_FAIL;
1489 }
1490
1491 while ((brp_list[brp_2].used ||
1492 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1493 brp_2++;
1494
1495 printf("brp(IVA) found num: %d\n", brp_2);
1496 if (brp_2 >= aarch64->brp_num) {
1497 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1498 return ERROR_FAIL;
1499 }
1500
1501 breakpoint->set = brp_1 + 1;
1502 breakpoint->linked_BRP = brp_2;
1503 control_CTX = ((CTX_machmode & 0x7) << 20)
1504 | (brp_2 << 16)
1505 | (0 << 14)
1506 | (CTX_byte_addr_select << 5)
1507 | (3 << 1) | 1;
1508 brp_list[brp_1].used = 1;
1509 brp_list[brp_1].value = (breakpoint->asid);
1510 brp_list[brp_1].control = control_CTX;
1511 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1512 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1513 brp_list[brp_1].value);
1514 if (retval != ERROR_OK)
1515 return retval;
1516 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1517 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1518 brp_list[brp_1].control);
1519 if (retval != ERROR_OK)
1520 return retval;
1521
1522 control_IVA = ((IVA_machmode & 0x7) << 20)
1523 | (brp_1 << 16)
1524 | (1 << 13)
1525 | (IVA_byte_addr_select << 5)
1526 | (3 << 1) | 1;
1527 brp_list[brp_2].used = 1;
1528 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1529 brp_list[brp_2].control = control_IVA;
1530 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1531 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1532 brp_list[brp_2].value & 0xFFFFFFFF);
1533 if (retval != ERROR_OK)
1534 return retval;
1535 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1536 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1537 brp_list[brp_2].value >> 32);
1538 if (retval != ERROR_OK)
1539 return retval;
1540 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1541 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1542 brp_list[brp_2].control);
1543 if (retval != ERROR_OK)
1544 return retval;
1545
1546 return ERROR_OK;
1547 }
1548
1549 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1550 {
1551 int retval;
1552 struct aarch64_common *aarch64 = target_to_aarch64(target);
1553 struct armv8_common *armv8 = &aarch64->armv8_common;
1554 struct aarch64_brp *brp_list = aarch64->brp_list;
1555
1556 if (!breakpoint->set) {
1557 LOG_WARNING("breakpoint not set");
1558 return ERROR_OK;
1559 }
1560
1561 if (breakpoint->type == BKPT_HARD) {
1562 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1563 int brp_i = breakpoint->set - 1;
1564 int brp_j = breakpoint->linked_BRP;
1565 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1566 LOG_DEBUG("Invalid BRP number in breakpoint");
1567 return ERROR_OK;
1568 }
1569 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1570 brp_list[brp_i].control, brp_list[brp_i].value);
1571 brp_list[brp_i].used = 0;
1572 brp_list[brp_i].value = 0;
1573 brp_list[brp_i].control = 0;
1574 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1575 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1576 brp_list[brp_i].control);
1577 if (retval != ERROR_OK)
1578 return retval;
1579 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1580 LOG_DEBUG("Invalid BRP number in breakpoint");
1581 return ERROR_OK;
1582 }
1583 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1584 brp_list[brp_j].control, brp_list[brp_j].value);
1585 brp_list[brp_j].used = 0;
1586 brp_list[brp_j].value = 0;
1587 brp_list[brp_j].control = 0;
1588 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1589 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1590 brp_list[brp_j].control);
1591 if (retval != ERROR_OK)
1592 return retval;
1593 breakpoint->linked_BRP = 0;
1594 breakpoint->set = 0;
1595 return ERROR_OK;
1596
1597 } else {
1598 int brp_i = breakpoint->set - 1;
1599 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1600 LOG_DEBUG("Invalid BRP number in breakpoint");
1601 return ERROR_OK;
1602 }
1603 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1604 brp_list[brp_i].control, brp_list[brp_i].value);
1605 brp_list[brp_i].used = 0;
1606 brp_list[brp_i].value = 0;
1607 brp_list[brp_i].control = 0;
1608 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1609 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1610 brp_list[brp_i].control);
1611 if (retval != ERROR_OK)
1612 return retval;
1613 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1614 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1615 brp_list[brp_i].value);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 breakpoint->set = 0;
1619 return ERROR_OK;
1620 }
1621 } else {
1622 /* restore original instruction (kept in target endianness) */
1623 if (breakpoint->length == 4) {
1624 retval = target_write_memory(target,
1625 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1626 4, 1, breakpoint->orig_instr);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 } else {
1630 retval = target_write_memory(target,
1631 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1632 2, 1, breakpoint->orig_instr);
1633 if (retval != ERROR_OK)
1634 return retval;
1635 }
1636 }
1637 breakpoint->set = 0;
1638
1639 return ERROR_OK;
1640 }
1641
1642 static int aarch64_add_breakpoint(struct target *target,
1643 struct breakpoint *breakpoint)
1644 {
1645 struct aarch64_common *aarch64 = target_to_aarch64(target);
1646
1647 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1648 LOG_INFO("no hardware breakpoint available");
1649 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1650 }
1651
1652 if (breakpoint->type == BKPT_HARD)
1653 aarch64->brp_num_available--;
1654
1655 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1656 }
1657
1658 static int aarch64_add_context_breakpoint(struct target *target,
1659 struct breakpoint *breakpoint)
1660 {
1661 struct aarch64_common *aarch64 = target_to_aarch64(target);
1662
1663 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1664 LOG_INFO("no hardware breakpoint available");
1665 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1666 }
1667
1668 if (breakpoint->type == BKPT_HARD)
1669 aarch64->brp_num_available--;
1670
1671 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1672 }
1673
1674 static int aarch64_add_hybrid_breakpoint(struct target *target,
1675 struct breakpoint *breakpoint)
1676 {
1677 struct aarch64_common *aarch64 = target_to_aarch64(target);
1678
1679 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1680 LOG_INFO("no hardware breakpoint available");
1681 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1682 }
1683
1684 if (breakpoint->type == BKPT_HARD)
1685 aarch64->brp_num_available--;
1686
1687 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1688 }
1689
1690
1691 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1692 {
1693 struct aarch64_common *aarch64 = target_to_aarch64(target);
1694
1695 #if 0
1696 /* It is perfectly possible to remove breakpoints while the target is running */
1697 if (target->state != TARGET_HALTED) {
1698 LOG_WARNING("target not halted");
1699 return ERROR_TARGET_NOT_HALTED;
1700 }
1701 #endif
1702
1703 if (breakpoint->set) {
1704 aarch64_unset_breakpoint(target, breakpoint);
1705 if (breakpoint->type == BKPT_HARD)
1706 aarch64->brp_num_available++;
1707 }
1708
1709 return ERROR_OK;
1710 }
1711
1712 /*
1713 * Cortex-A8 Reset functions
1714 */
1715
1716 static int aarch64_assert_reset(struct target *target)
1717 {
1718 struct armv8_common *armv8 = target_to_armv8(target);
1719
1720 LOG_DEBUG(" ");
1721
1722 /* FIXME when halt is requested, make it work somehow... */
1723
1724 /* Issue some kind of warm reset. */
1725 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1726 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1727 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1728 /* REVISIT handle "pulls" cases, if there's
1729 * hardware that needs them to work.
1730 */
1731 jtag_add_reset(0, 1);
1732 } else {
1733 LOG_ERROR("%s: how to reset?", target_name(target));
1734 return ERROR_FAIL;
1735 }
1736
1737 /* registers are now invalid */
1738 register_cache_invalidate(armv8->arm.core_cache);
1739
1740 target->state = TARGET_RESET;
1741
1742 return ERROR_OK;
1743 }
1744
1745 static int aarch64_deassert_reset(struct target *target)
1746 {
1747 int retval;
1748
1749 LOG_DEBUG(" ");
1750
1751 /* be certain SRST is off */
1752 jtag_add_reset(0, 0);
1753
1754 retval = aarch64_poll(target);
1755 if (retval != ERROR_OK)
1756 return retval;
1757
1758 if (target->reset_halt) {
1759 if (target->state != TARGET_HALTED) {
1760 LOG_WARNING("%s: ran after reset and before halt ...",
1761 target_name(target));
1762 retval = target_halt(target);
1763 if (retval != ERROR_OK)
1764 return retval;
1765 }
1766 }
1767
1768 return ERROR_OK;
1769 }
1770
1771 static int aarch64_write_apb_ap_memory(struct target *target,
1772 uint64_t address, uint32_t size,
1773 uint32_t count, const uint8_t *buffer)
1774 {
1775 /* write memory through APB-AP */
1776 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1777 struct armv8_common *armv8 = target_to_armv8(target);
1778 struct arm *arm = &armv8->arm;
1779 int total_bytes = count * size;
1780 int total_u32;
1781 int start_byte = address & 0x3;
1782 int end_byte = (address + total_bytes) & 0x3;
1783 struct reg *reg;
1784 uint32_t dscr;
1785 uint8_t *tmp_buff = NULL;
1786
1787 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1788 address, size, count);
1789 if (target->state != TARGET_HALTED) {
1790 LOG_WARNING("target not halted");
1791 return ERROR_TARGET_NOT_HALTED;
1792 }
1793
1794 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1795
1796 /* Mark register R0 as dirty, as it will be used
1797 * for transferring the data.
1798 * It will be restored automatically when exiting
1799 * debug mode
1800 */
1801 reg = armv8_reg_current(arm, 1);
1802 reg->dirty = true;
1803
1804 reg = armv8_reg_current(arm, 0);
1805 reg->dirty = true;
1806
1807 /* clear any abort */
1808 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1809 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1810 if (retval != ERROR_OK)
1811 return retval;
1812
1813
1814 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1815
1816 /* The algorithm only copies 32 bit words, so the buffer
1817 * should be expanded to include the words at either end.
1818 * The first and last words will be read first to avoid
1819 * corruption if needed.
1820 */
1821 tmp_buff = malloc(total_u32 * 4);
1822
1823 if ((start_byte != 0) && (total_u32 > 1)) {
1824 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1825 * the other bytes in the word.
1826 */
1827 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1828 if (retval != ERROR_OK)
1829 goto error_free_buff_w;
1830 }
1831
1832 /* If end of write is not aligned, or the write is less than 4 bytes */
1833 if ((end_byte != 0) ||
1834 ((total_u32 == 1) && (total_bytes != 4))) {
1835
1836 /* Read the last word to avoid corruption during 32 bit write */
1837 int mem_offset = (total_u32-1) * 4;
1838 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1839 if (retval != ERROR_OK)
1840 goto error_free_buff_w;
1841 }
1842
1843 /* Copy the write buffer over the top of the temporary buffer */
1844 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1845
1846 /* We now have a 32 bit aligned buffer that can be written */
1847
1848 /* Read DSCR */
1849 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1850 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1851 if (retval != ERROR_OK)
1852 goto error_free_buff_w;
1853
1854 /* Set Normal access mode */
1855 dscr = (dscr & ~DSCR_MA);
1856 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1857 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1858
1859 if (arm->core_state == ARM_STATE_AARCH64) {
1860 /* Write X0 with value 'address' using write procedure */
1861 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1862 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1863 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1864 retval += aarch64_exec_opcode(target,
1865 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1866 } else {
1867 /* Write R0 with value 'address' using write procedure */
1868 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1869 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1870 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1871 retval += aarch64_exec_opcode(target,
1872 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1873
1874 }
1875 /* Step 1.d - Change DCC to memory mode */
1876 dscr = dscr | DSCR_MA;
1877 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1878 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1879 if (retval != ERROR_OK)
1880 goto error_unset_dtr_w;
1881
1882
1883 /* Step 2.a - Do the write */
1884 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1885 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1886 if (retval != ERROR_OK)
1887 goto error_unset_dtr_w;
1888
1889 /* Step 3.a - Switch DTR mode back to Normal mode */
1890 dscr = (dscr & ~DSCR_MA);
1891 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1892 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1893 if (retval != ERROR_OK)
1894 goto error_unset_dtr_w;
1895
1896 /* Check for sticky abort flags in the DSCR */
1897 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1898 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1899 if (retval != ERROR_OK)
1900 goto error_free_buff_w;
1901 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1902 /* Abort occurred - clear it and exit */
1903 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1904 mem_ap_write_atomic_u32(armv8->debug_ap,
1905 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1906 goto error_free_buff_w;
1907 }
1908
1909 /* Done */
1910 free(tmp_buff);
1911 return ERROR_OK;
1912
1913 error_unset_dtr_w:
1914 /* Unset DTR mode */
1915 mem_ap_read_atomic_u32(armv8->debug_ap,
1916 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1917 dscr = (dscr & ~DSCR_MA);
1918 mem_ap_write_atomic_u32(armv8->debug_ap,
1919 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1920 error_free_buff_w:
1921 LOG_ERROR("error");
1922 free(tmp_buff);
1923 return ERROR_FAIL;
1924 }
1925
1926 static int aarch64_read_apb_ap_memory(struct target *target,
1927 target_addr_t address, uint32_t size,
1928 uint32_t count, uint8_t *buffer)
1929 {
1930 /* read memory through APB-AP */
1931 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1932 struct armv8_common *armv8 = target_to_armv8(target);
1933 struct arm *arm = &armv8->arm;
1934 int total_bytes = count * size;
1935 int total_u32;
1936 int start_byte = address & 0x3;
1937 int end_byte = (address + total_bytes) & 0x3;
1938 struct reg *reg;
1939 uint32_t dscr;
1940 uint8_t *tmp_buff = NULL;
1941 uint8_t *u8buf_ptr;
1942 uint32_t value;
1943
1944 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1945 address, size, count);
1946 if (target->state != TARGET_HALTED) {
1947 LOG_WARNING("target not halted");
1948 return ERROR_TARGET_NOT_HALTED;
1949 }
1950
1951 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1952 /* Mark register X0, X1 as dirty, as it will be used
1953 * for transferring the data.
1954 * It will be restored automatically when exiting
1955 * debug mode
1956 */
1957 reg = armv8_reg_current(arm, 1);
1958 reg->dirty = true;
1959
1960 reg = armv8_reg_current(arm, 0);
1961 reg->dirty = true;
1962
1963 /* clear any abort */
1964 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1965 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1966 if (retval != ERROR_OK)
1967 goto error_free_buff_r;
1968
1969 /* Read DSCR */
1970 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1971 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1972
1973 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1974
1975 /* Set Normal access mode */
1976 dscr = (dscr & ~DSCR_MA);
1977 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1978 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1979
1980 if (arm->core_state == ARM_STATE_AARCH64) {
1981 /* Write X0 with value 'address' using write procedure */
1982 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1983 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1984 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1985 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1986 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1987 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1988 /* Step 1.e - Change DCC to memory mode */
1989 dscr = dscr | DSCR_MA;
1990 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1991 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1992 /* Step 1.f - read DBGDTRTX and discard the value */
1993 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1994 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1995 } else {
1996 /* Write R0 with value 'address' using write procedure */
1997 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1998 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1999 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2000 retval += aarch64_exec_opcode(target,
2001 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2002 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2003 retval += aarch64_exec_opcode(target,
2004 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2005 /* Step 1.e - Change DCC to memory mode */
2006 dscr = dscr | DSCR_MA;
2007 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2008 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2009 /* Step 1.f - read DBGDTRTX and discard the value */
2010 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2011 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2012
2013 }
2014 if (retval != ERROR_OK)
2015 goto error_unset_dtr_r;
2016
2017 /* Optimize the read as much as we can, either way we read in a single pass */
2018 if ((start_byte) || (end_byte)) {
2019 /* The algorithm only copies 32 bit words, so the buffer
2020 * should be expanded to include the words at either end.
2021 * The first and last words will be read into a temp buffer
2022 * to avoid corruption
2023 */
2024 tmp_buff = malloc(total_u32 * 4);
2025 if (!tmp_buff)
2026 goto error_unset_dtr_r;
2027
2028 /* use the tmp buffer to read the entire data */
2029 u8buf_ptr = tmp_buff;
2030 } else
2031 /* address and read length are aligned so read directly into the passed buffer */
2032 u8buf_ptr = buffer;
2033
2034 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2035 * Abort flags are sticky, so can be read at end of transactions
2036 *
2037 * This data is read in aligned to 32 bit boundary.
2038 */
2039
2040 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2041 * increments X0 by 4. */
2042 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2043 armv8->debug_base + CPUV8_DBG_DTRTX);
2044 if (retval != ERROR_OK)
2045 goto error_unset_dtr_r;
2046
2047 /* Step 3.a - set DTR access mode back to Normal mode */
2048 dscr = (dscr & ~DSCR_MA);
2049 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2050 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2051 if (retval != ERROR_OK)
2052 goto error_free_buff_r;
2053
2054 /* Step 3.b - read DBGDTRTX for the final value */
2055 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2056 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2057 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2058
2059 /* Check for sticky abort flags in the DSCR */
2060 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2061 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2062 if (retval != ERROR_OK)
2063 goto error_free_buff_r;
2064 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2065 /* Abort occurred - clear it and exit */
2066 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2067 mem_ap_write_atomic_u32(armv8->debug_ap,
2068 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2069 goto error_free_buff_r;
2070 }
2071
2072 /* check if we need to copy aligned data by applying any shift necessary */
2073 if (tmp_buff) {
2074 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2075 free(tmp_buff);
2076 }
2077
2078 /* Done */
2079 return ERROR_OK;
2080
2081 error_unset_dtr_r:
2082 /* Unset DTR mode */
2083 mem_ap_read_atomic_u32(armv8->debug_ap,
2084 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2085 dscr = (dscr & ~DSCR_MA);
2086 mem_ap_write_atomic_u32(armv8->debug_ap,
2087 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2088 error_free_buff_r:
2089 LOG_ERROR("error");
2090 free(tmp_buff);
2091 return ERROR_FAIL;
2092 }
2093
2094 static int aarch64_read_phys_memory(struct target *target,
2095 target_addr_t address, uint32_t size,
2096 uint32_t count, uint8_t *buffer)
2097 {
2098 struct armv8_common *armv8 = target_to_armv8(target);
2099 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2100 struct adiv5_dap *swjdp = armv8->arm.dap;
2101 uint8_t apsel = swjdp->apsel;
2102 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2103 address, size, count);
2104
2105 if (count && buffer) {
2106
2107 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2108
2109 /* read memory through AHB-AP */
2110 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2111 } else {
2112 /* read memory through APB-AP */
2113 retval = aarch64_mmu_modify(target, 0);
2114 if (retval != ERROR_OK)
2115 return retval;
2116 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2117 }
2118 }
2119 return retval;
2120 }
2121
2122 static int aarch64_read_memory(struct target *target, target_addr_t address,
2123 uint32_t size, uint32_t count, uint8_t *buffer)
2124 {
2125 int mmu_enabled = 0;
2126 target_addr_t virt, phys;
2127 int retval;
2128 struct armv8_common *armv8 = target_to_armv8(target);
2129 struct adiv5_dap *swjdp = armv8->arm.dap;
2130 uint8_t apsel = swjdp->apsel;
2131
2132 /* aarch64 handles unaligned memory access */
2133 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2134 size, count);
2135
2136 /* determine if MMU was enabled on target stop */
2137 if (!armv8->is_armv7r) {
2138 retval = aarch64_mmu(target, &mmu_enabled);
2139 if (retval != ERROR_OK)
2140 return retval;
2141 }
2142
2143 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2144 if (mmu_enabled) {
2145 virt = address;
2146 retval = aarch64_virt2phys(target, virt, &phys);
2147 if (retval != ERROR_OK)
2148 return retval;
2149
2150 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2151 virt, phys);
2152 address = phys;
2153 }
2154 retval = aarch64_read_phys_memory(target, address, size, count,
2155 buffer);
2156 } else {
2157 if (mmu_enabled) {
2158 retval = aarch64_check_address(target, address);
2159 if (retval != ERROR_OK)
2160 return retval;
2161 /* enable MMU as we could have disabled it for phys
2162 access */
2163 retval = aarch64_mmu_modify(target, 1);
2164 if (retval != ERROR_OK)
2165 return retval;
2166 }
2167 retval = aarch64_read_apb_ap_memory(target, address, size,
2168 count, buffer);
2169 }
2170 return retval;
2171 }
2172
2173 static int aarch64_write_phys_memory(struct target *target,
2174 target_addr_t address, uint32_t size,
2175 uint32_t count, const uint8_t *buffer)
2176 {
2177 struct armv8_common *armv8 = target_to_armv8(target);
2178 struct adiv5_dap *swjdp = armv8->arm.dap;
2179 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2180 uint8_t apsel = swjdp->apsel;
2181
2182 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2183 size, count);
2184
2185 if (count && buffer) {
2186
2187 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2188
2189 /* write memory through AHB-AP */
2190 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2191 } else {
2192
2193 /* write memory through APB-AP */
2194 if (!armv8->is_armv7r) {
2195 retval = aarch64_mmu_modify(target, 0);
2196 if (retval != ERROR_OK)
2197 return retval;
2198 }
2199 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2200 }
2201 }
2202
2203
2204 /* REVISIT this op is generic ARMv7-A/R stuff */
2205 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2206 struct arm_dpm *dpm = armv8->arm.dpm;
2207
2208 retval = dpm->prepare(dpm);
2209 if (retval != ERROR_OK)
2210 return retval;
2211
2212 /* The Cache handling will NOT work with MMU active, the
2213 * wrong addresses will be invalidated!
2214 *
2215 * For both ICache and DCache, walk all cache lines in the
2216 * address range. Cortex-A8 has fixed 64 byte line length.
2217 *
2218 * REVISIT per ARMv7, these may trigger watchpoints ...
2219 */
2220
2221 /* invalidate I-Cache */
2222 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2223 /* ICIMVAU - Invalidate Cache single entry
2224 * with MVA to PoU
2225 * MCR p15, 0, r0, c7, c5, 1
2226 */
2227 for (uint32_t cacheline = address;
2228 cacheline < address + size * count;
2229 cacheline += 64) {
2230 retval = dpm->instr_write_data_r0(dpm,
2231 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2232 cacheline);
2233 if (retval != ERROR_OK)
2234 return retval;
2235 }
2236 }
2237
2238 /* invalidate D-Cache */
2239 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2240 /* DCIMVAC - Invalidate data Cache line
2241 * with MVA to PoC
2242 * MCR p15, 0, r0, c7, c6, 1
2243 */
2244 for (uint32_t cacheline = address;
2245 cacheline < address + size * count;
2246 cacheline += 64) {
2247 retval = dpm->instr_write_data_r0(dpm,
2248 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2249 cacheline);
2250 if (retval != ERROR_OK)
2251 return retval;
2252 }
2253 }
2254
2255 /* (void) */ dpm->finish(dpm);
2256 }
2257
2258 return retval;
2259 }
2260
2261 static int aarch64_write_memory(struct target *target, target_addr_t address,
2262 uint32_t size, uint32_t count, const uint8_t *buffer)
2263 {
2264 int mmu_enabled = 0;
2265 target_addr_t virt, phys;
2266 int retval;
2267 struct armv8_common *armv8 = target_to_armv8(target);
2268 struct adiv5_dap *swjdp = armv8->arm.dap;
2269 uint8_t apsel = swjdp->apsel;
2270
2271 /* aarch64 handles unaligned memory access */
2272 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2273 "; count %" PRId32, address, size, count);
2274
2275 /* determine if MMU was enabled on target stop */
2276 if (!armv8->is_armv7r) {
2277 retval = aarch64_mmu(target, &mmu_enabled);
2278 if (retval != ERROR_OK)
2279 return retval;
2280 }
2281
2282 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2283 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2284 PRId32 "; count %" PRId32, address, size, count);
2285 if (mmu_enabled) {
2286 virt = address;
2287 retval = aarch64_virt2phys(target, virt, &phys);
2288 if (retval != ERROR_OK)
2289 return retval;
2290
2291 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2292 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2293 address = phys;
2294 }
2295 retval = aarch64_write_phys_memory(target, address, size,
2296 count, buffer);
2297 } else {
2298 if (mmu_enabled) {
2299 retval = aarch64_check_address(target, address);
2300 if (retval != ERROR_OK)
2301 return retval;
2302 /* enable MMU as we could have disabled it for phys access */
2303 retval = aarch64_mmu_modify(target, 1);
2304 if (retval != ERROR_OK)
2305 return retval;
2306 }
2307 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2308 }
2309 return retval;
2310 }
2311
2312 static int aarch64_handle_target_request(void *priv)
2313 {
2314 struct target *target = priv;
2315 struct armv8_common *armv8 = target_to_armv8(target);
2316 int retval;
2317
2318 if (!target_was_examined(target))
2319 return ERROR_OK;
2320 if (!target->dbg_msg_enabled)
2321 return ERROR_OK;
2322
2323 if (target->state == TARGET_RUNNING) {
2324 uint32_t request;
2325 uint32_t dscr;
2326 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2327 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2328
2329 /* check if we have data */
2330 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2331 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2332 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2333 if (retval == ERROR_OK) {
2334 target_request(target, request);
2335 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2336 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2337 }
2338 }
2339 }
2340
2341 return ERROR_OK;
2342 }
2343
2344 static int aarch64_examine_first(struct target *target)
2345 {
2346 struct aarch64_common *aarch64 = target_to_aarch64(target);
2347 struct armv8_common *armv8 = &aarch64->armv8_common;
2348 struct adiv5_dap *swjdp = armv8->arm.dap;
2349 int retval = ERROR_OK;
2350 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2351 int i;
2352
2353 /* We do one extra read to ensure DAP is configured,
2354 * we call ahbap_debugport_init(swjdp) instead
2355 */
2356 retval = dap_dp_init(swjdp);
2357 if (retval != ERROR_OK)
2358 return retval;
2359
2360 /* Search for the APB-AB - it is needed for access to debug registers */
2361 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2362 if (retval != ERROR_OK) {
2363 LOG_ERROR("Could not find APB-AP for debug access");
2364 return retval;
2365 }
2366
2367 retval = mem_ap_init(armv8->debug_ap);
2368 if (retval != ERROR_OK) {
2369 LOG_ERROR("Could not initialize the APB-AP");
2370 return retval;
2371 }
2372
2373 armv8->debug_ap->memaccess_tck = 80;
2374
2375 /* Search for the AHB-AB */
2376 armv8->memory_ap_available = false;
2377 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2378 if (retval == ERROR_OK) {
2379 retval = mem_ap_init(armv8->memory_ap);
2380 if (retval == ERROR_OK)
2381 armv8->memory_ap_available = true;
2382 }
2383 if (retval != ERROR_OK) {
2384 /* AHB-AP not found or unavailable - use the CPU */
2385 LOG_DEBUG("No AHB-AP available for memory access");
2386 }
2387
2388
2389 if (!target->dbgbase_set) {
2390 uint32_t dbgbase;
2391 /* Get ROM Table base */
2392 uint32_t apid;
2393 int32_t coreidx = target->coreid;
2394 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2395 if (retval != ERROR_OK)
2396 return retval;
2397 /* Lookup 0x15 -- Processor DAP */
2398 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2399 &armv8->debug_base, &coreidx);
2400 if (retval != ERROR_OK)
2401 return retval;
2402 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2403 coreidx, armv8->debug_base);
2404 } else
2405 armv8->debug_base = target->dbgbase;
2406
2407 LOG_DEBUG("Target ctibase is 0x%x", target->ctibase);
2408 if (target->ctibase == 0)
2409 armv8->cti_base = target->ctibase = armv8->debug_base + 0x1000;
2410 else
2411 armv8->cti_base = target->ctibase;
2412
2413 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2414 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2415 if (retval != ERROR_OK) {
2416 LOG_DEBUG("Examine %s failed", "oslock");
2417 return retval;
2418 }
2419
2420 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2421 armv8->debug_base + 0x88, &cpuid);
2422 LOG_DEBUG("0x88 = %x", cpuid);
2423
2424 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2425 armv8->debug_base + 0x314, &cpuid);
2426 LOG_DEBUG("0x314 = %x", cpuid);
2427
2428 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2429 armv8->debug_base + 0x310, &cpuid);
2430 LOG_DEBUG("0x310 = %x", cpuid);
2431 if (retval != ERROR_OK)
2432 return retval;
2433
2434 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2435 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2436 if (retval != ERROR_OK) {
2437 LOG_DEBUG("Examine %s failed", "CPUID");
2438 return retval;
2439 }
2440
2441 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2442 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2443 if (retval != ERROR_OK) {
2444 LOG_DEBUG("Examine %s failed", "CTYPR");
2445 return retval;
2446 }
2447
2448 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2449 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2450 if (retval != ERROR_OK) {
2451 LOG_DEBUG("Examine %s failed", "TTYPR");
2452 return retval;
2453 }
2454
2455 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2456 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2457 if (retval != ERROR_OK) {
2458 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2459 return retval;
2460 }
2461 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2462 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2463 if (retval != ERROR_OK) {
2464 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2465 return retval;
2466 }
2467
2468 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2469 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2470 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2471 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2472 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2473
2474 armv8->arm.core_type = ARM_MODE_MON;
2475 armv8->arm.core_state = ARM_STATE_AARCH64;
2476 retval = aarch64_dpm_setup(aarch64, debug);
2477 if (retval != ERROR_OK)
2478 return retval;
2479
2480 /* Setup Breakpoint Register Pairs */
2481 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2482 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2483
2484 /* hack - no context bpt support yet */
2485 aarch64->brp_num_context = 0;
2486
2487 aarch64->brp_num_available = aarch64->brp_num;
2488 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2489 for (i = 0; i < aarch64->brp_num; i++) {
2490 aarch64->brp_list[i].used = 0;
2491 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2492 aarch64->brp_list[i].type = BRP_NORMAL;
2493 else
2494 aarch64->brp_list[i].type = BRP_CONTEXT;
2495 aarch64->brp_list[i].value = 0;
2496 aarch64->brp_list[i].control = 0;
2497 aarch64->brp_list[i].BRPn = i;
2498 }
2499
2500 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2501
2502 target_set_examined(target);
2503 return ERROR_OK;
2504 }
2505
2506 static int aarch64_examine(struct target *target)
2507 {
2508 int retval = ERROR_OK;
2509
2510 /* don't re-probe hardware after each reset */
2511 if (!target_was_examined(target))
2512 retval = aarch64_examine_first(target);
2513
2514 /* Configure core debug access */
2515 if (retval == ERROR_OK)
2516 retval = aarch64_init_debug_access(target);
2517
2518 return retval;
2519 }
2520
2521 /*
2522 * Cortex-A8 target creation and initialization
2523 */
2524
2525 static int aarch64_init_target(struct command_context *cmd_ctx,
2526 struct target *target)
2527 {
2528 /* examine_first() does a bunch of this */
2529 return ERROR_OK;
2530 }
2531
2532 static int aarch64_init_arch_info(struct target *target,
2533 struct aarch64_common *aarch64, struct jtag_tap *tap)
2534 {
2535 struct armv8_common *armv8 = &aarch64->armv8_common;
2536 struct adiv5_dap *dap = armv8->arm.dap;
2537
2538 armv8->arm.dap = dap;
2539
2540 /* Setup struct aarch64_common */
2541 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2542 /* tap has no dap initialized */
2543 if (!tap->dap) {
2544 tap->dap = dap_init();
2545
2546 /* Leave (only) generic DAP stuff for debugport_init() */
2547 tap->dap->tap = tap;
2548 }
2549
2550 armv8->arm.dap = tap->dap;
2551
2552 aarch64->fast_reg_read = 0;
2553
2554 /* register arch-specific functions */
2555 armv8->examine_debug_reason = NULL;
2556
2557 armv8->post_debug_entry = aarch64_post_debug_entry;
2558
2559 armv8->pre_restore_context = NULL;
2560
2561 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2562
2563 /* REVISIT v7a setup should be in a v7a-specific routine */
2564 armv8_init_arch_info(target, armv8);
2565 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2566
2567 return ERROR_OK;
2568 }
2569
2570 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2571 {
2572 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2573
2574 aarch64->armv8_common.is_armv7r = false;
2575
2576 return aarch64_init_arch_info(target, aarch64, target->tap);
2577 }
2578
2579 static int aarch64_mmu(struct target *target, int *enabled)
2580 {
2581 if (target->state != TARGET_HALTED) {
2582 LOG_ERROR("%s: target not halted", __func__);
2583 return ERROR_TARGET_INVALID;
2584 }
2585
2586 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2587 return ERROR_OK;
2588 }
2589
2590 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2591 target_addr_t *phys)
2592 {
2593 int retval = ERROR_FAIL;
2594 struct armv8_common *armv8 = target_to_armv8(target);
2595 struct adiv5_dap *swjdp = armv8->arm.dap;
2596 uint8_t apsel = swjdp->apsel;
2597 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2598 uint32_t ret;
2599 retval = armv8_mmu_translate_va(target,
2600 virt, &ret);
2601 if (retval != ERROR_OK)
2602 goto done;
2603 *phys = ret;
2604 } else {/* use this method if armv8->memory_ap not selected
2605 * mmu must be enable in order to get a correct translation */
2606 retval = aarch64_mmu_modify(target, 1);
2607 if (retval != ERROR_OK)
2608 goto done;
2609 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2610 }
2611 done:
2612 return retval;
2613 }
2614
2615 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2616 {
2617 struct target *target = get_current_target(CMD_CTX);
2618 struct armv8_common *armv8 = target_to_armv8(target);
2619
2620 return armv8_handle_cache_info_command(CMD_CTX,
2621 &armv8->armv8_mmu.armv8_cache);
2622 }
2623
2624
2625 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2626 {
2627 struct target *target = get_current_target(CMD_CTX);
2628 if (!target_was_examined(target)) {
2629 LOG_ERROR("target not examined yet");
2630 return ERROR_FAIL;
2631 }
2632
2633 return aarch64_init_debug_access(target);
2634 }
2635 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2636 {
2637 struct target *target = get_current_target(CMD_CTX);
2638 /* check target is an smp target */
2639 struct target_list *head;
2640 struct target *curr;
2641 head = target->head;
2642 target->smp = 0;
2643 if (head != (struct target_list *)NULL) {
2644 while (head != (struct target_list *)NULL) {
2645 curr = head->target;
2646 curr->smp = 0;
2647 head = head->next;
2648 }
2649 /* fixes the target display to the debugger */
2650 target->gdb_service->target = target;
2651 }
2652 return ERROR_OK;
2653 }
2654
2655 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2656 {
2657 struct target *target = get_current_target(CMD_CTX);
2658 struct target_list *head;
2659 struct target *curr;
2660 head = target->head;
2661 if (head != (struct target_list *)NULL) {
2662 target->smp = 1;
2663 while (head != (struct target_list *)NULL) {
2664 curr = head->target;
2665 curr->smp = 1;
2666 head = head->next;
2667 }
2668 }
2669 return ERROR_OK;
2670 }
2671
2672 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2673 {
2674 struct target *target = get_current_target(CMD_CTX);
2675 int retval = ERROR_OK;
2676 struct target_list *head;
2677 head = target->head;
2678 if (head != (struct target_list *)NULL) {
2679 if (CMD_ARGC == 1) {
2680 int coreid = 0;
2681 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2682 if (ERROR_OK != retval)
2683 return retval;
2684 target->gdb_service->core[1] = coreid;
2685
2686 }
2687 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2688 , target->gdb_service->core[1]);
2689 }
2690 return ERROR_OK;
2691 }
2692
2693 static const struct command_registration aarch64_exec_command_handlers[] = {
2694 {
2695 .name = "cache_info",
2696 .handler = aarch64_handle_cache_info_command,
2697 .mode = COMMAND_EXEC,
2698 .help = "display information about target caches",
2699 .usage = "",
2700 },
2701 {
2702 .name = "dbginit",
2703 .handler = aarch64_handle_dbginit_command,
2704 .mode = COMMAND_EXEC,
2705 .help = "Initialize core debug",
2706 .usage = "",
2707 },
2708 { .name = "smp_off",
2709 .handler = aarch64_handle_smp_off_command,
2710 .mode = COMMAND_EXEC,
2711 .help = "Stop smp handling",
2712 .usage = "",
2713 },
2714 {
2715 .name = "smp_on",
2716 .handler = aarch64_handle_smp_on_command,
2717 .mode = COMMAND_EXEC,
2718 .help = "Restart smp handling",
2719 .usage = "",
2720 },
2721 {
2722 .name = "smp_gdb",
2723 .handler = aarch64_handle_smp_gdb_command,
2724 .mode = COMMAND_EXEC,
2725 .help = "display/fix current core played to gdb",
2726 .usage = "",
2727 },
2728
2729
2730 COMMAND_REGISTRATION_DONE
2731 };
2732 static const struct command_registration aarch64_command_handlers[] = {
2733 {
2734 .chain = arm_command_handlers,
2735 },
2736 {
2737 .chain = armv8_command_handlers,
2738 },
2739 {
2740 .name = "cortex_a",
2741 .mode = COMMAND_ANY,
2742 .help = "Cortex-A command group",
2743 .usage = "",
2744 .chain = aarch64_exec_command_handlers,
2745 },
2746 COMMAND_REGISTRATION_DONE
2747 };
2748
2749 struct target_type aarch64_target = {
2750 .name = "aarch64",
2751
2752 .poll = aarch64_poll,
2753 .arch_state = armv8_arch_state,
2754
2755 .halt = aarch64_halt,
2756 .resume = aarch64_resume,
2757 .step = aarch64_step,
2758
2759 .assert_reset = aarch64_assert_reset,
2760 .deassert_reset = aarch64_deassert_reset,
2761
2762 /* REVISIT allow exporting VFP3 registers ... */
2763 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2764
2765 .read_memory = aarch64_read_memory,
2766 .write_memory = aarch64_write_memory,
2767
2768 .checksum_memory = arm_checksum_memory,
2769 .blank_check_memory = arm_blank_check_memory,
2770
2771 .run_algorithm = armv4_5_run_algorithm,
2772
2773 .add_breakpoint = aarch64_add_breakpoint,
2774 .add_context_breakpoint = aarch64_add_context_breakpoint,
2775 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2776 .remove_breakpoint = aarch64_remove_breakpoint,
2777 .add_watchpoint = NULL,
2778 .remove_watchpoint = NULL,
2779
2780 .commands = aarch64_command_handlers,
2781 .target_create = aarch64_target_create,
2782 .init_target = aarch64_init_target,
2783 .examine = aarch64_examine,
2784
2785 .read_phys_memory = aarch64_read_phys_memory,
2786 .write_phys_memory = aarch64_write_phys_memory,
2787 .mmu = aarch64_mmu,
2788 .virt2phys = aarch64_virt2phys,
2789 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)