aarch64: correct breakpoint register offset
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
93 }
94 }
95 return retval;
96 }
97
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
101 {
102 /* TODO */
103 return ERROR_OK;
104 }
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
109 {
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
113
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
119 }
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 }
154 }
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
162 }
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
197 }
198 }
199 }
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG(" ");
213
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
224 }
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
235
236 /* Resync breakpoint registers */
237
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
240 }
241
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
246 */
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
249 {
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
254
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
256
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
265 }
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
276
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
284 }
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
288 }
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
290
291 if (dscr_p)
292 *dscr_p = dscr;
293
294 return retval;
295 }
296
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
301 {
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
304
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
306
307 return retval;
308 }
309
310 /*
311 * AARCH64 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
324 }
325
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
331 }
332
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
334 {
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
343 }
344
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
347 {
348 uint32_t dscr = DSCR_ITE;
349 int retval;
350
351 if (dscr_p)
352 dscr = *dscr_p;
353
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
365 }
366 }
367
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
383 {
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
387
388 if (dscr_p)
389 dscr = *dscr_p;
390
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
402 }
403 }
404
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
410
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
416
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
419
420 if (dscr_p)
421 *dscr_p = dscr;
422
423 return retval;
424 }
425
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
427 {
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
431
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
445 }
446 }
447
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463
464 return retval;
465 }
466
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
468 {
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
471 }
472
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
475 {
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
478
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
483 }
484
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
491
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
500 }
501
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
504 {
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
508
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
517 }
518
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
521 {
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
525
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
529
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
542
543 return retval;
544 }
545
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
548 {
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
552
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
556
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
569
570 return retval;
571 }
572
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
574 {
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
577
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 DSB_SY,
581 &dscr);
582 }
583
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
586 {
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
590
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
598
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
600 }
601
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
604 {
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
608
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
618 }
619
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
622 {
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
626
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
642
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
644 }
645
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
648 {
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
652
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
660
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
668
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
670 }
671
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
674 {
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
679
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
692 }
693 vr += 16 * index_t;
694 cr += 16 * index_t;
695
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
698
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
706 }
707
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
709 {
710 struct aarch64_common *a = dpm_to_a8(dpm);
711 uint32_t cr;
712
713 switch (index_t) {
714 case 0 ... 15:
715 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
716 break;
717 case 16 ... 31:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
719 index_t -= 16;
720 break;
721 default:
722 return ERROR_FAIL;
723 }
724 cr += 16 * index_t;
725
726 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
727
728 /* clear control register */
729 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
730
731 }
732
733 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
734 {
735 struct arm_dpm *dpm = &a8->armv8_common.dpm;
736 int retval;
737
738 dpm->arm = &a8->armv8_common.arm;
739 dpm->didr = debug;
740
741 dpm->prepare = aarch64_dpm_prepare;
742 dpm->finish = aarch64_dpm_finish;
743
744 dpm->instr_execute = aarch64_instr_execute;
745 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
746 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
747 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
748 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
749 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
750
751 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
752 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
753 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
754 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
755
756 dpm->arm_reg_current = armv8_reg_current;
757
758 dpm->bpwp_enable = aarch64_bpwp_enable;
759 dpm->bpwp_disable = aarch64_bpwp_disable;
760
761 retval = armv8_dpm_setup(dpm);
762 if (retval == ERROR_OK)
763 retval = armv8_dpm_initialize(dpm);
764
765 return retval;
766 }
767 static struct target *get_aarch64(struct target *target, int32_t coreid)
768 {
769 struct target_list *head;
770 struct target *curr;
771
772 head = target->head;
773 while (head != (struct target_list *)NULL) {
774 curr = head->target;
775 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
776 return curr;
777 head = head->next;
778 }
779 return target;
780 }
781 static int aarch64_halt(struct target *target);
782
783 static int aarch64_halt_smp(struct target *target)
784 {
785 int retval = 0;
786 struct target_list *head;
787 struct target *curr;
788 head = target->head;
789 while (head != (struct target_list *)NULL) {
790 curr = head->target;
791 if ((curr != target) && (curr->state != TARGET_HALTED))
792 retval += aarch64_halt(curr);
793 head = head->next;
794 }
795 return retval;
796 }
797
798 static int update_halt_gdb(struct target *target)
799 {
800 int retval = 0;
801 if (target->gdb_service && target->gdb_service->core[0] == -1) {
802 target->gdb_service->target = target;
803 target->gdb_service->core[0] = target->coreid;
804 retval += aarch64_halt_smp(target);
805 }
806 return retval;
807 }
808
809 /*
810 * Cortex-A8 Run control
811 */
812
813 static int aarch64_poll(struct target *target)
814 {
815 int retval = ERROR_OK;
816 uint32_t dscr;
817 struct aarch64_common *aarch64 = target_to_aarch64(target);
818 struct armv8_common *armv8 = &aarch64->armv8_common;
819 enum target_state prev_target_state = target->state;
820 /* toggle to another core is done by gdb as follow */
821 /* maint packet J core_id */
822 /* continue */
823 /* the next polling trigger an halt event sent to gdb */
824 if ((target->state == TARGET_HALTED) && (target->smp) &&
825 (target->gdb_service) &&
826 (target->gdb_service->target == NULL)) {
827 target->gdb_service->target =
828 get_aarch64(target, target->gdb_service->core[1]);
829 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
830 return retval;
831 }
832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
834 if (retval != ERROR_OK)
835 return retval;
836 aarch64->cpudbg_dscr = dscr;
837
838 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
839 if (prev_target_state != TARGET_HALTED) {
840 /* We have a halting debug event */
841 LOG_DEBUG("Target halted");
842 target->state = TARGET_HALTED;
843 if ((prev_target_state == TARGET_RUNNING)
844 || (prev_target_state == TARGET_UNKNOWN)
845 || (prev_target_state == TARGET_RESET)) {
846 retval = aarch64_debug_entry(target);
847 if (retval != ERROR_OK)
848 return retval;
849 if (target->smp) {
850 retval = update_halt_gdb(target);
851 if (retval != ERROR_OK)
852 return retval;
853 }
854 target_call_event_callbacks(target,
855 TARGET_EVENT_HALTED);
856 }
857 if (prev_target_state == TARGET_DEBUG_RUNNING) {
858 LOG_DEBUG(" ");
859
860 retval = aarch64_debug_entry(target);
861 if (retval != ERROR_OK)
862 return retval;
863 if (target->smp) {
864 retval = update_halt_gdb(target);
865 if (retval != ERROR_OK)
866 return retval;
867 }
868
869 target_call_event_callbacks(target,
870 TARGET_EVENT_DEBUG_HALTED);
871 }
872 }
873 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
874 target->state = TARGET_RUNNING;
875 else {
876 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
877 target->state = TARGET_UNKNOWN;
878 }
879
880 return retval;
881 }
882
883 static int aarch64_halt(struct target *target)
884 {
885 int retval = ERROR_OK;
886 uint32_t dscr;
887 struct armv8_common *armv8 = target_to_armv8(target);
888
889 /* enable CTI*/
890 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
891 armv8->cti_base + CTI_CTR, 1);
892 if (retval != ERROR_OK)
893 return retval;
894
895 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
896 armv8->cti_base + CTI_GATE, 3);
897 if (retval != ERROR_OK)
898 return retval;
899
900 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
901 armv8->cti_base + CTI_OUTEN0, 1);
902 if (retval != ERROR_OK)
903 return retval;
904
905 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
906 armv8->cti_base + CTI_OUTEN1, 2);
907 if (retval != ERROR_OK)
908 return retval;
909
910 /*
911 * add HDE in halting debug mode
912 */
913 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
914 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
915 if (retval != ERROR_OK)
916 return retval;
917
918 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
919 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
920 if (retval != ERROR_OK)
921 return retval;
922
923 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
924 armv8->cti_base + CTI_APPPULSE, 1);
925 if (retval != ERROR_OK)
926 return retval;
927
928 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
929 armv8->cti_base + CTI_INACK, 1);
930 if (retval != ERROR_OK)
931 return retval;
932
933
934 long long then = timeval_ms();
935 for (;; ) {
936 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
937 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
938 if (retval != ERROR_OK)
939 return retval;
940 if ((dscr & DSCRV8_HALT_MASK) != 0)
941 break;
942 if (timeval_ms() > then + 1000) {
943 LOG_ERROR("Timeout waiting for halt");
944 return ERROR_FAIL;
945 }
946 }
947
948 target->debug_reason = DBG_REASON_DBGRQ;
949
950 return ERROR_OK;
951 }
952
953 static int aarch64_internal_restore(struct target *target, int current,
954 uint64_t *address, int handle_breakpoints, int debug_execution)
955 {
956 struct armv8_common *armv8 = target_to_armv8(target);
957 struct arm *arm = &armv8->arm;
958 int retval;
959 uint64_t resume_pc;
960
961 if (!debug_execution)
962 target_free_all_working_areas(target);
963
964 /* current = 1: continue on current pc, otherwise continue at <address> */
965 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
966 if (!current)
967 resume_pc = *address;
968 else
969 *address = resume_pc;
970
971 /* Make sure that the Armv7 gdb thumb fixups does not
972 * kill the return address
973 */
974 switch (arm->core_state) {
975 case ARM_STATE_ARM:
976 resume_pc &= 0xFFFFFFFC;
977 break;
978 case ARM_STATE_AARCH64:
979 resume_pc &= 0xFFFFFFFFFFFFFFFC;
980 break;
981 case ARM_STATE_THUMB:
982 case ARM_STATE_THUMB_EE:
983 /* When the return address is loaded into PC
984 * bit 0 must be 1 to stay in Thumb state
985 */
986 resume_pc |= 0x1;
987 break;
988 case ARM_STATE_JAZELLE:
989 LOG_ERROR("How do I resume into Jazelle state??");
990 return ERROR_FAIL;
991 }
992 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
993 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
994 arm->pc->dirty = 1;
995 arm->pc->valid = 1;
996 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
997
998 /* called it now before restoring context because it uses cpu
999 * register r0 for restoring system control register */
1000 retval = aarch64_restore_system_control_reg(target);
1001 if (retval != ERROR_OK)
1002 return retval;
1003 retval = aarch64_restore_context(target, handle_breakpoints);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 target->debug_reason = DBG_REASON_NOTHALTED;
1007 target->state = TARGET_RUNNING;
1008
1009 /* registers are now invalid */
1010 register_cache_invalidate(arm->core_cache);
1011
1012 #if 0
1013 /* the front-end may request us not to handle breakpoints */
1014 if (handle_breakpoints) {
1015 /* Single step past breakpoint at current address */
1016 breakpoint = breakpoint_find(target, resume_pc);
1017 if (breakpoint) {
1018 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1019 cortex_m3_unset_breakpoint(target, breakpoint);
1020 cortex_m3_single_step_core(target);
1021 cortex_m3_set_breakpoint(target, breakpoint);
1022 }
1023 }
1024 #endif
1025
1026 return retval;
1027 }
1028
1029 static int aarch64_internal_restart(struct target *target)
1030 {
1031 struct armv8_common *armv8 = target_to_armv8(target);
1032 struct arm *arm = &armv8->arm;
1033 int retval;
1034 uint32_t dscr;
1035 /*
1036 * * Restart core and wait for it to be started. Clear ITRen and sticky
1037 * * exception flags: see ARMv7 ARM, C5.9.
1038 *
1039 * REVISIT: for single stepping, we probably want to
1040 * disable IRQs by default, with optional override...
1041 */
1042
1043 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1044 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1045 if (retval != ERROR_OK)
1046 return retval;
1047
1048 if ((dscr & DSCR_ITE) == 0)
1049 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1050
1051 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1052 armv8->cti_base + CTI_APPPULSE, 2);
1053 if (retval != ERROR_OK)
1054 return retval;
1055
1056 long long then = timeval_ms();
1057 for (;; ) {
1058 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1059 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1060 if (retval != ERROR_OK)
1061 return retval;
1062 if ((dscr & DSCR_HDE) != 0)
1063 break;
1064 if (timeval_ms() > then + 1000) {
1065 LOG_ERROR("Timeout waiting for resume");
1066 return ERROR_FAIL;
1067 }
1068 }
1069
1070 target->debug_reason = DBG_REASON_NOTHALTED;
1071 target->state = TARGET_RUNNING;
1072
1073 /* registers are now invalid */
1074 register_cache_invalidate(arm->core_cache);
1075
1076 return ERROR_OK;
1077 }
1078
1079 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1080 {
1081 int retval = 0;
1082 struct target_list *head;
1083 struct target *curr;
1084 uint64_t address;
1085 head = target->head;
1086 while (head != (struct target_list *)NULL) {
1087 curr = head->target;
1088 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1089 /* resume current address , not in step mode */
1090 retval += aarch64_internal_restore(curr, 1, &address,
1091 handle_breakpoints, 0);
1092 retval += aarch64_internal_restart(curr);
1093 }
1094 head = head->next;
1095
1096 }
1097 return retval;
1098 }
1099
1100 static int aarch64_resume(struct target *target, int current,
1101 target_addr_t address, int handle_breakpoints, int debug_execution)
1102 {
1103 int retval = 0;
1104 uint64_t addr = address;
1105
1106 /* dummy resume for smp toggle in order to reduce gdb impact */
1107 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1108 /* simulate a start and halt of target */
1109 target->gdb_service->target = NULL;
1110 target->gdb_service->core[0] = target->gdb_service->core[1];
1111 /* fake resume at next poll we play the target core[1], see poll*/
1112 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1113 return 0;
1114 }
1115 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1116 debug_execution);
1117 if (target->smp) {
1118 target->gdb_service->core[0] = -1;
1119 retval = aarch64_restore_smp(target, handle_breakpoints);
1120 if (retval != ERROR_OK)
1121 return retval;
1122 }
1123 aarch64_internal_restart(target);
1124
1125 if (!debug_execution) {
1126 target->state = TARGET_RUNNING;
1127 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1128 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1129 } else {
1130 target->state = TARGET_DEBUG_RUNNING;
1131 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1132 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1133 }
1134
1135 return ERROR_OK;
1136 }
1137
1138 static int aarch64_debug_entry(struct target *target)
1139 {
1140 int retval = ERROR_OK;
1141 struct aarch64_common *aarch64 = target_to_aarch64(target);
1142 struct armv8_common *armv8 = target_to_armv8(target);
1143
1144 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1145
1146 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1147 * imprecise data aborts get discarded by issuing a Data
1148 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1149 */
1150
1151 /* make sure to clear all sticky errors */
1152 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1153 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1154 if (retval != ERROR_OK)
1155 return retval;
1156
1157 /* Examine debug reason */
1158 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1159
1160 /* save address of instruction that triggered the watchpoint? */
1161 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1162 uint32_t tmp;
1163 uint64_t wfar = 0;
1164
1165 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1166 armv8->debug_base + CPUV8_DBG_WFAR1,
1167 &tmp);
1168 if (retval != ERROR_OK)
1169 return retval;
1170 wfar = tmp;
1171 wfar = (wfar << 32);
1172 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1173 armv8->debug_base + CPUV8_DBG_WFAR0,
1174 &tmp);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 wfar |= tmp;
1178 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1179 }
1180
1181 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1182
1183 if (armv8->post_debug_entry) {
1184 retval = armv8->post_debug_entry(target);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 }
1188
1189 return retval;
1190 }
1191
1192 static int aarch64_post_debug_entry(struct target *target)
1193 {
1194 struct aarch64_common *aarch64 = target_to_aarch64(target);
1195 struct armv8_common *armv8 = &aarch64->armv8_common;
1196 int retval;
1197
1198 mem_ap_write_atomic_u32(armv8->debug_ap,
1199 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1200 switch (armv8->arm.core_mode) {
1201 case ARMV8_64_EL0T:
1202 case ARMV8_64_EL1T:
1203 case ARMV8_64_EL1H:
1204 retval = armv8->arm.mrs(target, 3, /*op 0*/
1205 0, 0, /* op1, op2 */
1206 1, 0, /* CRn, CRm */
1207 &aarch64->system_control_reg);
1208 if (retval != ERROR_OK)
1209 return retval;
1210 break;
1211 case ARMV8_64_EL2T:
1212 case ARMV8_64_EL2H:
1213 retval = armv8->arm.mrs(target, 3, /*op 0*/
1214 4, 0, /* op1, op2 */
1215 1, 0, /* CRn, CRm */
1216 &aarch64->system_control_reg);
1217 if (retval != ERROR_OK)
1218 return retval;
1219 break;
1220 case ARMV8_64_EL3H:
1221 case ARMV8_64_EL3T:
1222 retval = armv8->arm.mrs(target, 3, /*op 0*/
1223 6, 0, /* op1, op2 */
1224 1, 0, /* CRn, CRm */
1225 &aarch64->system_control_reg);
1226 if (retval != ERROR_OK)
1227 return retval;
1228 break;
1229 default:
1230 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1231 }
1232 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1233 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1234
1235 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1236 armv8_identify_cache(target);
1237
1238 armv8->armv8_mmu.mmu_enabled =
1239 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1240 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1241 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1242 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1243 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1244 aarch64->curr_mode = armv8->arm.core_mode;
1245 return ERROR_OK;
1246 }
1247
1248 static int aarch64_step(struct target *target, int current, target_addr_t address,
1249 int handle_breakpoints)
1250 {
1251 struct armv8_common *armv8 = target_to_armv8(target);
1252 int retval;
1253 uint32_t tmp;
1254
1255 if (target->state != TARGET_HALTED) {
1256 LOG_WARNING("target not halted");
1257 return ERROR_TARGET_NOT_HALTED;
1258 }
1259
1260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1261 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1262 if (retval != ERROR_OK)
1263 return retval;
1264
1265 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1266 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1267 if (retval != ERROR_OK)
1268 return retval;
1269
1270 target->debug_reason = DBG_REASON_SINGLESTEP;
1271 retval = aarch64_resume(target, 1, address, 0, 0);
1272 if (retval != ERROR_OK)
1273 return retval;
1274
1275 long long then = timeval_ms();
1276 while (target->state != TARGET_HALTED) {
1277 mem_ap_read_atomic_u32(armv8->debug_ap,
1278 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1279 LOG_DEBUG("DESR = %#x", tmp);
1280 retval = aarch64_poll(target);
1281 if (retval != ERROR_OK)
1282 return retval;
1283 if (timeval_ms() > then + 1000) {
1284 LOG_ERROR("timeout waiting for target halt");
1285 return ERROR_FAIL;
1286 }
1287 }
1288
1289 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1290 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1291 if (retval != ERROR_OK)
1292 return retval;
1293
1294 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1295 if (target->state == TARGET_HALTED)
1296 LOG_DEBUG("target stepped");
1297
1298 return ERROR_OK;
1299 }
1300
1301 static int aarch64_restore_context(struct target *target, bool bpwp)
1302 {
1303 struct armv8_common *armv8 = target_to_armv8(target);
1304
1305 LOG_DEBUG(" ");
1306
1307 if (armv8->pre_restore_context)
1308 armv8->pre_restore_context(target);
1309
1310 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1311
1312 }
1313
1314 /*
1315 * Cortex-A8 Breakpoint and watchpoint functions
1316 */
1317
1318 /* Setup hardware Breakpoint Register Pair */
1319 static int aarch64_set_breakpoint(struct target *target,
1320 struct breakpoint *breakpoint, uint8_t matchmode)
1321 {
1322 int retval;
1323 int brp_i = 0;
1324 uint32_t control;
1325 uint8_t byte_addr_select = 0x0F;
1326 struct aarch64_common *aarch64 = target_to_aarch64(target);
1327 struct armv8_common *armv8 = &aarch64->armv8_common;
1328 struct aarch64_brp *brp_list = aarch64->brp_list;
1329 uint32_t dscr;
1330
1331 if (breakpoint->set) {
1332 LOG_WARNING("breakpoint already set");
1333 return ERROR_OK;
1334 }
1335
1336 if (breakpoint->type == BKPT_HARD) {
1337 int64_t bpt_value;
1338 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1339 brp_i++;
1340 if (brp_i >= aarch64->brp_num) {
1341 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1342 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1343 }
1344 breakpoint->set = brp_i + 1;
1345 if (breakpoint->length == 2)
1346 byte_addr_select = (3 << (breakpoint->address & 0x02));
1347 control = ((matchmode & 0x7) << 20)
1348 | (1 << 13)
1349 | (byte_addr_select << 5)
1350 | (3 << 1) | 1;
1351 brp_list[brp_i].used = 1;
1352 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1353 brp_list[brp_i].control = control;
1354 bpt_value = brp_list[brp_i].value;
1355
1356 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1357 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1358 (uint32_t)(bpt_value & 0xFFFFFFFF));
1359 if (retval != ERROR_OK)
1360 return retval;
1361 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1362 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1363 (uint32_t)(bpt_value >> 32));
1364 if (retval != ERROR_OK)
1365 return retval;
1366
1367 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1368 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1369 brp_list[brp_i].control);
1370 if (retval != ERROR_OK)
1371 return retval;
1372 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1373 brp_list[brp_i].control,
1374 brp_list[brp_i].value);
1375
1376 } else if (breakpoint->type == BKPT_SOFT) {
1377 uint8_t code[4];
1378 buf_set_u32(code, 0, 32, ARMV8_BKPT(0x11));
1379 retval = target_read_memory(target,
1380 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1381 breakpoint->length, 1,
1382 breakpoint->orig_instr);
1383 if (retval != ERROR_OK)
1384 return retval;
1385 retval = target_write_memory(target,
1386 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1387 breakpoint->length, 1, code);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 breakpoint->set = 0x11; /* Any nice value but 0 */
1391 }
1392
1393 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1394 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1395 /* Ensure that halting debug mode is enable */
1396 dscr = dscr | DSCR_HDE;
1397 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1398 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1399 if (retval != ERROR_OK) {
1400 LOG_DEBUG("Failed to set DSCR.HDE");
1401 return retval;
1402 }
1403
1404 return ERROR_OK;
1405 }
1406
1407 static int aarch64_set_context_breakpoint(struct target *target,
1408 struct breakpoint *breakpoint, uint8_t matchmode)
1409 {
1410 int retval = ERROR_FAIL;
1411 int brp_i = 0;
1412 uint32_t control;
1413 uint8_t byte_addr_select = 0x0F;
1414 struct aarch64_common *aarch64 = target_to_aarch64(target);
1415 struct armv8_common *armv8 = &aarch64->armv8_common;
1416 struct aarch64_brp *brp_list = aarch64->brp_list;
1417
1418 if (breakpoint->set) {
1419 LOG_WARNING("breakpoint already set");
1420 return retval;
1421 }
1422 /*check available context BRPs*/
1423 while ((brp_list[brp_i].used ||
1424 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1425 brp_i++;
1426
1427 if (brp_i >= aarch64->brp_num) {
1428 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1429 return ERROR_FAIL;
1430 }
1431
1432 breakpoint->set = brp_i + 1;
1433 control = ((matchmode & 0x7) << 20)
1434 | (1 << 13)
1435 | (byte_addr_select << 5)
1436 | (3 << 1) | 1;
1437 brp_list[brp_i].used = 1;
1438 brp_list[brp_i].value = (breakpoint->asid);
1439 brp_list[brp_i].control = control;
1440 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1441 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1442 brp_list[brp_i].value);
1443 if (retval != ERROR_OK)
1444 return retval;
1445 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1446 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1447 brp_list[brp_i].control);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1451 brp_list[brp_i].control,
1452 brp_list[brp_i].value);
1453 return ERROR_OK;
1454
1455 }
1456
1457 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1458 {
1459 int retval = ERROR_FAIL;
1460 int brp_1 = 0; /* holds the contextID pair */
1461 int brp_2 = 0; /* holds the IVA pair */
1462 uint32_t control_CTX, control_IVA;
1463 uint8_t CTX_byte_addr_select = 0x0F;
1464 uint8_t IVA_byte_addr_select = 0x0F;
1465 uint8_t CTX_machmode = 0x03;
1466 uint8_t IVA_machmode = 0x01;
1467 struct aarch64_common *aarch64 = target_to_aarch64(target);
1468 struct armv8_common *armv8 = &aarch64->armv8_common;
1469 struct aarch64_brp *brp_list = aarch64->brp_list;
1470
1471 if (breakpoint->set) {
1472 LOG_WARNING("breakpoint already set");
1473 return retval;
1474 }
1475 /*check available context BRPs*/
1476 while ((brp_list[brp_1].used ||
1477 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1478 brp_1++;
1479
1480 printf("brp(CTX) found num: %d\n", brp_1);
1481 if (brp_1 >= aarch64->brp_num) {
1482 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1483 return ERROR_FAIL;
1484 }
1485
1486 while ((brp_list[brp_2].used ||
1487 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1488 brp_2++;
1489
1490 printf("brp(IVA) found num: %d\n", brp_2);
1491 if (brp_2 >= aarch64->brp_num) {
1492 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1493 return ERROR_FAIL;
1494 }
1495
1496 breakpoint->set = brp_1 + 1;
1497 breakpoint->linked_BRP = brp_2;
1498 control_CTX = ((CTX_machmode & 0x7) << 20)
1499 | (brp_2 << 16)
1500 | (0 << 14)
1501 | (CTX_byte_addr_select << 5)
1502 | (3 << 1) | 1;
1503 brp_list[brp_1].used = 1;
1504 brp_list[brp_1].value = (breakpoint->asid);
1505 brp_list[brp_1].control = control_CTX;
1506 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1507 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1508 brp_list[brp_1].value);
1509 if (retval != ERROR_OK)
1510 return retval;
1511 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1512 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1513 brp_list[brp_1].control);
1514 if (retval != ERROR_OK)
1515 return retval;
1516
1517 control_IVA = ((IVA_machmode & 0x7) << 20)
1518 | (brp_1 << 16)
1519 | (1 << 13)
1520 | (IVA_byte_addr_select << 5)
1521 | (3 << 1) | 1;
1522 brp_list[brp_2].used = 1;
1523 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1524 brp_list[brp_2].control = control_IVA;
1525 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1526 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1527 brp_list[brp_2].value & 0xFFFFFFFF);
1528 if (retval != ERROR_OK)
1529 return retval;
1530 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1531 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1532 brp_list[brp_2].value >> 32);
1533 if (retval != ERROR_OK)
1534 return retval;
1535 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1536 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1537 brp_list[brp_2].control);
1538 if (retval != ERROR_OK)
1539 return retval;
1540
1541 return ERROR_OK;
1542 }
1543
1544 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1545 {
1546 int retval;
1547 struct aarch64_common *aarch64 = target_to_aarch64(target);
1548 struct armv8_common *armv8 = &aarch64->armv8_common;
1549 struct aarch64_brp *brp_list = aarch64->brp_list;
1550
1551 if (!breakpoint->set) {
1552 LOG_WARNING("breakpoint not set");
1553 return ERROR_OK;
1554 }
1555
1556 if (breakpoint->type == BKPT_HARD) {
1557 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1558 int brp_i = breakpoint->set - 1;
1559 int brp_j = breakpoint->linked_BRP;
1560 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1561 LOG_DEBUG("Invalid BRP number in breakpoint");
1562 return ERROR_OK;
1563 }
1564 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1565 brp_list[brp_i].control, brp_list[brp_i].value);
1566 brp_list[brp_i].used = 0;
1567 brp_list[brp_i].value = 0;
1568 brp_list[brp_i].control = 0;
1569 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1570 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1571 brp_list[brp_i].control);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1575 LOG_DEBUG("Invalid BRP number in breakpoint");
1576 return ERROR_OK;
1577 }
1578 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1579 brp_list[brp_j].control, brp_list[brp_j].value);
1580 brp_list[brp_j].used = 0;
1581 brp_list[brp_j].value = 0;
1582 brp_list[brp_j].control = 0;
1583 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1584 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1585 brp_list[brp_j].control);
1586 if (retval != ERROR_OK)
1587 return retval;
1588 breakpoint->linked_BRP = 0;
1589 breakpoint->set = 0;
1590 return ERROR_OK;
1591
1592 } else {
1593 int brp_i = breakpoint->set - 1;
1594 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1595 LOG_DEBUG("Invalid BRP number in breakpoint");
1596 return ERROR_OK;
1597 }
1598 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1599 brp_list[brp_i].control, brp_list[brp_i].value);
1600 brp_list[brp_i].used = 0;
1601 brp_list[brp_i].value = 0;
1602 brp_list[brp_i].control = 0;
1603 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1604 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1605 brp_list[brp_i].control);
1606 if (retval != ERROR_OK)
1607 return retval;
1608 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1609 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1610 brp_list[brp_i].value);
1611 if (retval != ERROR_OK)
1612 return retval;
1613 breakpoint->set = 0;
1614 return ERROR_OK;
1615 }
1616 } else {
1617 /* restore original instruction (kept in target endianness) */
1618 if (breakpoint->length == 4) {
1619 retval = target_write_memory(target,
1620 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1621 4, 1, breakpoint->orig_instr);
1622 if (retval != ERROR_OK)
1623 return retval;
1624 } else {
1625 retval = target_write_memory(target,
1626 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1627 2, 1, breakpoint->orig_instr);
1628 if (retval != ERROR_OK)
1629 return retval;
1630 }
1631 }
1632 breakpoint->set = 0;
1633
1634 return ERROR_OK;
1635 }
1636
1637 static int aarch64_add_breakpoint(struct target *target,
1638 struct breakpoint *breakpoint)
1639 {
1640 struct aarch64_common *aarch64 = target_to_aarch64(target);
1641
1642 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1643 LOG_INFO("no hardware breakpoint available");
1644 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1645 }
1646
1647 if (breakpoint->type == BKPT_HARD)
1648 aarch64->brp_num_available--;
1649
1650 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1651 }
1652
1653 static int aarch64_add_context_breakpoint(struct target *target,
1654 struct breakpoint *breakpoint)
1655 {
1656 struct aarch64_common *aarch64 = target_to_aarch64(target);
1657
1658 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1659 LOG_INFO("no hardware breakpoint available");
1660 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1661 }
1662
1663 if (breakpoint->type == BKPT_HARD)
1664 aarch64->brp_num_available--;
1665
1666 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1667 }
1668
1669 static int aarch64_add_hybrid_breakpoint(struct target *target,
1670 struct breakpoint *breakpoint)
1671 {
1672 struct aarch64_common *aarch64 = target_to_aarch64(target);
1673
1674 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1675 LOG_INFO("no hardware breakpoint available");
1676 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1677 }
1678
1679 if (breakpoint->type == BKPT_HARD)
1680 aarch64->brp_num_available--;
1681
1682 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1683 }
1684
1685
1686 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1687 {
1688 struct aarch64_common *aarch64 = target_to_aarch64(target);
1689
1690 #if 0
1691 /* It is perfectly possible to remove breakpoints while the target is running */
1692 if (target->state != TARGET_HALTED) {
1693 LOG_WARNING("target not halted");
1694 return ERROR_TARGET_NOT_HALTED;
1695 }
1696 #endif
1697
1698 if (breakpoint->set) {
1699 aarch64_unset_breakpoint(target, breakpoint);
1700 if (breakpoint->type == BKPT_HARD)
1701 aarch64->brp_num_available++;
1702 }
1703
1704 return ERROR_OK;
1705 }
1706
1707 /*
1708 * Cortex-A8 Reset functions
1709 */
1710
1711 static int aarch64_assert_reset(struct target *target)
1712 {
1713 struct armv8_common *armv8 = target_to_armv8(target);
1714
1715 LOG_DEBUG(" ");
1716
1717 /* FIXME when halt is requested, make it work somehow... */
1718
1719 /* Issue some kind of warm reset. */
1720 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1721 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1722 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1723 /* REVISIT handle "pulls" cases, if there's
1724 * hardware that needs them to work.
1725 */
1726 jtag_add_reset(0, 1);
1727 } else {
1728 LOG_ERROR("%s: how to reset?", target_name(target));
1729 return ERROR_FAIL;
1730 }
1731
1732 /* registers are now invalid */
1733 register_cache_invalidate(armv8->arm.core_cache);
1734
1735 target->state = TARGET_RESET;
1736
1737 return ERROR_OK;
1738 }
1739
1740 static int aarch64_deassert_reset(struct target *target)
1741 {
1742 int retval;
1743
1744 LOG_DEBUG(" ");
1745
1746 /* be certain SRST is off */
1747 jtag_add_reset(0, 0);
1748
1749 retval = aarch64_poll(target);
1750 if (retval != ERROR_OK)
1751 return retval;
1752
1753 if (target->reset_halt) {
1754 if (target->state != TARGET_HALTED) {
1755 LOG_WARNING("%s: ran after reset and before halt ...",
1756 target_name(target));
1757 retval = target_halt(target);
1758 if (retval != ERROR_OK)
1759 return retval;
1760 }
1761 }
1762
1763 return ERROR_OK;
1764 }
1765
1766 static int aarch64_write_apb_ap_memory(struct target *target,
1767 uint64_t address, uint32_t size,
1768 uint32_t count, const uint8_t *buffer)
1769 {
1770 /* write memory through APB-AP */
1771 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1772 struct armv8_common *armv8 = target_to_armv8(target);
1773 struct arm *arm = &armv8->arm;
1774 int total_bytes = count * size;
1775 int total_u32;
1776 int start_byte = address & 0x3;
1777 int end_byte = (address + total_bytes) & 0x3;
1778 struct reg *reg;
1779 uint32_t dscr;
1780 uint8_t *tmp_buff = NULL;
1781
1782 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1783 address, size, count);
1784 if (target->state != TARGET_HALTED) {
1785 LOG_WARNING("target not halted");
1786 return ERROR_TARGET_NOT_HALTED;
1787 }
1788
1789 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1790
1791 /* Mark register R0 as dirty, as it will be used
1792 * for transferring the data.
1793 * It will be restored automatically when exiting
1794 * debug mode
1795 */
1796 reg = armv8_reg_current(arm, 1);
1797 reg->dirty = true;
1798
1799 reg = armv8_reg_current(arm, 0);
1800 reg->dirty = true;
1801
1802 /* clear any abort */
1803 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1804 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1805 if (retval != ERROR_OK)
1806 return retval;
1807
1808
1809 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1810
1811 /* The algorithm only copies 32 bit words, so the buffer
1812 * should be expanded to include the words at either end.
1813 * The first and last words will be read first to avoid
1814 * corruption if needed.
1815 */
1816 tmp_buff = malloc(total_u32 * 4);
1817
1818 if ((start_byte != 0) && (total_u32 > 1)) {
1819 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1820 * the other bytes in the word.
1821 */
1822 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1823 if (retval != ERROR_OK)
1824 goto error_free_buff_w;
1825 }
1826
1827 /* If end of write is not aligned, or the write is less than 4 bytes */
1828 if ((end_byte != 0) ||
1829 ((total_u32 == 1) && (total_bytes != 4))) {
1830
1831 /* Read the last word to avoid corruption during 32 bit write */
1832 int mem_offset = (total_u32-1) * 4;
1833 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1834 if (retval != ERROR_OK)
1835 goto error_free_buff_w;
1836 }
1837
1838 /* Copy the write buffer over the top of the temporary buffer */
1839 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1840
1841 /* We now have a 32 bit aligned buffer that can be written */
1842
1843 /* Read DSCR */
1844 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1845 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1846 if (retval != ERROR_OK)
1847 goto error_free_buff_w;
1848
1849 /* Set Normal access mode */
1850 dscr = (dscr & ~DSCR_MA);
1851 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1852 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1853
1854 if (arm->core_state == ARM_STATE_AARCH64) {
1855 /* Write X0 with value 'address' using write procedure */
1856 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1857 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1858 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1859 retval += aarch64_exec_opcode(target,
1860 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1861 } else {
1862 /* Write R0 with value 'address' using write procedure */
1863 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1864 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1865 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1866 retval += aarch64_exec_opcode(target,
1867 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1868
1869 }
1870 /* Step 1.d - Change DCC to memory mode */
1871 dscr = dscr | DSCR_MA;
1872 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1873 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1874 if (retval != ERROR_OK)
1875 goto error_unset_dtr_w;
1876
1877
1878 /* Step 2.a - Do the write */
1879 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1880 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1881 if (retval != ERROR_OK)
1882 goto error_unset_dtr_w;
1883
1884 /* Step 3.a - Switch DTR mode back to Normal mode */
1885 dscr = (dscr & ~DSCR_MA);
1886 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1887 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1888 if (retval != ERROR_OK)
1889 goto error_unset_dtr_w;
1890
1891 /* Check for sticky abort flags in the DSCR */
1892 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1893 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1894 if (retval != ERROR_OK)
1895 goto error_free_buff_w;
1896 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1897 /* Abort occurred - clear it and exit */
1898 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1899 mem_ap_write_atomic_u32(armv8->debug_ap,
1900 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1901 goto error_free_buff_w;
1902 }
1903
1904 /* Done */
1905 free(tmp_buff);
1906 return ERROR_OK;
1907
1908 error_unset_dtr_w:
1909 /* Unset DTR mode */
1910 mem_ap_read_atomic_u32(armv8->debug_ap,
1911 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1912 dscr = (dscr & ~DSCR_MA);
1913 mem_ap_write_atomic_u32(armv8->debug_ap,
1914 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1915 error_free_buff_w:
1916 LOG_ERROR("error");
1917 free(tmp_buff);
1918 return ERROR_FAIL;
1919 }
1920
1921 static int aarch64_read_apb_ap_memory(struct target *target,
1922 target_addr_t address, uint32_t size,
1923 uint32_t count, uint8_t *buffer)
1924 {
1925 /* read memory through APB-AP */
1926 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1927 struct armv8_common *armv8 = target_to_armv8(target);
1928 struct arm *arm = &armv8->arm;
1929 int total_bytes = count * size;
1930 int total_u32;
1931 int start_byte = address & 0x3;
1932 int end_byte = (address + total_bytes) & 0x3;
1933 struct reg *reg;
1934 uint32_t dscr;
1935 uint8_t *tmp_buff = NULL;
1936 uint8_t *u8buf_ptr;
1937 uint32_t value;
1938
1939 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1940 address, size, count);
1941 if (target->state != TARGET_HALTED) {
1942 LOG_WARNING("target not halted");
1943 return ERROR_TARGET_NOT_HALTED;
1944 }
1945
1946 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1947 /* Mark register X0, X1 as dirty, as it will be used
1948 * for transferring the data.
1949 * It will be restored automatically when exiting
1950 * debug mode
1951 */
1952 reg = armv8_reg_current(arm, 1);
1953 reg->dirty = true;
1954
1955 reg = armv8_reg_current(arm, 0);
1956 reg->dirty = true;
1957
1958 /* clear any abort */
1959 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1960 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1961 if (retval != ERROR_OK)
1962 goto error_free_buff_r;
1963
1964 /* Read DSCR */
1965 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1966 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1967
1968 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1969
1970 /* Set Normal access mode */
1971 dscr = (dscr & ~DSCR_MA);
1972 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1973 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1974
1975 if (arm->core_state == ARM_STATE_AARCH64) {
1976 /* Write X0 with value 'address' using write procedure */
1977 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1978 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1979 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1980 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1981 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1982 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1983 /* Step 1.e - Change DCC to memory mode */
1984 dscr = dscr | DSCR_MA;
1985 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1986 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1987 /* Step 1.f - read DBGDTRTX and discard the value */
1988 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1989 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1990 } else {
1991 /* Write R0 with value 'address' using write procedure */
1992 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1993 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1994 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1995 retval += aarch64_exec_opcode(target,
1996 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1997 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1998 retval += aarch64_exec_opcode(target,
1999 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2000 /* Step 1.e - Change DCC to memory mode */
2001 dscr = dscr | DSCR_MA;
2002 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2003 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2004 /* Step 1.f - read DBGDTRTX and discard the value */
2005 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2006 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2007
2008 }
2009 if (retval != ERROR_OK)
2010 goto error_unset_dtr_r;
2011
2012 /* Optimize the read as much as we can, either way we read in a single pass */
2013 if ((start_byte) || (end_byte)) {
2014 /* The algorithm only copies 32 bit words, so the buffer
2015 * should be expanded to include the words at either end.
2016 * The first and last words will be read into a temp buffer
2017 * to avoid corruption
2018 */
2019 tmp_buff = malloc(total_u32 * 4);
2020 if (!tmp_buff)
2021 goto error_unset_dtr_r;
2022
2023 /* use the tmp buffer to read the entire data */
2024 u8buf_ptr = tmp_buff;
2025 } else
2026 /* address and read length are aligned so read directly into the passed buffer */
2027 u8buf_ptr = buffer;
2028
2029 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2030 * Abort flags are sticky, so can be read at end of transactions
2031 *
2032 * This data is read in aligned to 32 bit boundary.
2033 */
2034
2035 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2036 * increments X0 by 4. */
2037 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2038 armv8->debug_base + CPUV8_DBG_DTRTX);
2039 if (retval != ERROR_OK)
2040 goto error_unset_dtr_r;
2041
2042 /* Step 3.a - set DTR access mode back to Normal mode */
2043 dscr = (dscr & ~DSCR_MA);
2044 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2045 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2046 if (retval != ERROR_OK)
2047 goto error_free_buff_r;
2048
2049 /* Step 3.b - read DBGDTRTX for the final value */
2050 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2051 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2052 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2053
2054 /* Check for sticky abort flags in the DSCR */
2055 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2056 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2057 if (retval != ERROR_OK)
2058 goto error_free_buff_r;
2059 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2060 /* Abort occurred - clear it and exit */
2061 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2062 mem_ap_write_atomic_u32(armv8->debug_ap,
2063 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2064 goto error_free_buff_r;
2065 }
2066
2067 /* check if we need to copy aligned data by applying any shift necessary */
2068 if (tmp_buff) {
2069 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2070 free(tmp_buff);
2071 }
2072
2073 /* Done */
2074 return ERROR_OK;
2075
2076 error_unset_dtr_r:
2077 /* Unset DTR mode */
2078 mem_ap_read_atomic_u32(armv8->debug_ap,
2079 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2080 dscr = (dscr & ~DSCR_MA);
2081 mem_ap_write_atomic_u32(armv8->debug_ap,
2082 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2083 error_free_buff_r:
2084 LOG_ERROR("error");
2085 free(tmp_buff);
2086 return ERROR_FAIL;
2087 }
2088
2089 static int aarch64_read_phys_memory(struct target *target,
2090 target_addr_t address, uint32_t size,
2091 uint32_t count, uint8_t *buffer)
2092 {
2093 struct armv8_common *armv8 = target_to_armv8(target);
2094 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2095 struct adiv5_dap *swjdp = armv8->arm.dap;
2096 uint8_t apsel = swjdp->apsel;
2097 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2098 address, size, count);
2099
2100 if (count && buffer) {
2101
2102 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2103
2104 /* read memory through AHB-AP */
2105 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2106 } else {
2107 /* read memory through APB-AP */
2108 retval = aarch64_mmu_modify(target, 0);
2109 if (retval != ERROR_OK)
2110 return retval;
2111 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2112 }
2113 }
2114 return retval;
2115 }
2116
2117 static int aarch64_read_memory(struct target *target, target_addr_t address,
2118 uint32_t size, uint32_t count, uint8_t *buffer)
2119 {
2120 int mmu_enabled = 0;
2121 target_addr_t virt, phys;
2122 int retval;
2123 struct armv8_common *armv8 = target_to_armv8(target);
2124 struct adiv5_dap *swjdp = armv8->arm.dap;
2125 uint8_t apsel = swjdp->apsel;
2126
2127 /* aarch64 handles unaligned memory access */
2128 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2129 size, count);
2130
2131 /* determine if MMU was enabled on target stop */
2132 if (!armv8->is_armv7r) {
2133 retval = aarch64_mmu(target, &mmu_enabled);
2134 if (retval != ERROR_OK)
2135 return retval;
2136 }
2137
2138 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2139 if (mmu_enabled) {
2140 virt = address;
2141 retval = aarch64_virt2phys(target, virt, &phys);
2142 if (retval != ERROR_OK)
2143 return retval;
2144
2145 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2146 virt, phys);
2147 address = phys;
2148 }
2149 retval = aarch64_read_phys_memory(target, address, size, count,
2150 buffer);
2151 } else {
2152 if (mmu_enabled) {
2153 retval = aarch64_check_address(target, address);
2154 if (retval != ERROR_OK)
2155 return retval;
2156 /* enable MMU as we could have disabled it for phys
2157 access */
2158 retval = aarch64_mmu_modify(target, 1);
2159 if (retval != ERROR_OK)
2160 return retval;
2161 }
2162 retval = aarch64_read_apb_ap_memory(target, address, size,
2163 count, buffer);
2164 }
2165 return retval;
2166 }
2167
2168 static int aarch64_write_phys_memory(struct target *target,
2169 target_addr_t address, uint32_t size,
2170 uint32_t count, const uint8_t *buffer)
2171 {
2172 struct armv8_common *armv8 = target_to_armv8(target);
2173 struct adiv5_dap *swjdp = armv8->arm.dap;
2174 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2175 uint8_t apsel = swjdp->apsel;
2176
2177 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2178 size, count);
2179
2180 if (count && buffer) {
2181
2182 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2183
2184 /* write memory through AHB-AP */
2185 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2186 } else {
2187
2188 /* write memory through APB-AP */
2189 if (!armv8->is_armv7r) {
2190 retval = aarch64_mmu_modify(target, 0);
2191 if (retval != ERROR_OK)
2192 return retval;
2193 }
2194 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2195 }
2196 }
2197
2198
2199 /* REVISIT this op is generic ARMv7-A/R stuff */
2200 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2201 struct arm_dpm *dpm = armv8->arm.dpm;
2202
2203 retval = dpm->prepare(dpm);
2204 if (retval != ERROR_OK)
2205 return retval;
2206
2207 /* The Cache handling will NOT work with MMU active, the
2208 * wrong addresses will be invalidated!
2209 *
2210 * For both ICache and DCache, walk all cache lines in the
2211 * address range. Cortex-A8 has fixed 64 byte line length.
2212 *
2213 * REVISIT per ARMv7, these may trigger watchpoints ...
2214 */
2215
2216 /* invalidate I-Cache */
2217 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2218 /* ICIMVAU - Invalidate Cache single entry
2219 * with MVA to PoU
2220 * MCR p15, 0, r0, c7, c5, 1
2221 */
2222 for (uint32_t cacheline = address;
2223 cacheline < address + size * count;
2224 cacheline += 64) {
2225 retval = dpm->instr_write_data_r0(dpm,
2226 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2227 cacheline);
2228 if (retval != ERROR_OK)
2229 return retval;
2230 }
2231 }
2232
2233 /* invalidate D-Cache */
2234 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2235 /* DCIMVAC - Invalidate data Cache line
2236 * with MVA to PoC
2237 * MCR p15, 0, r0, c7, c6, 1
2238 */
2239 for (uint32_t cacheline = address;
2240 cacheline < address + size * count;
2241 cacheline += 64) {
2242 retval = dpm->instr_write_data_r0(dpm,
2243 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2244 cacheline);
2245 if (retval != ERROR_OK)
2246 return retval;
2247 }
2248 }
2249
2250 /* (void) */ dpm->finish(dpm);
2251 }
2252
2253 return retval;
2254 }
2255
2256 static int aarch64_write_memory(struct target *target, target_addr_t address,
2257 uint32_t size, uint32_t count, const uint8_t *buffer)
2258 {
2259 int mmu_enabled = 0;
2260 target_addr_t virt, phys;
2261 int retval;
2262 struct armv8_common *armv8 = target_to_armv8(target);
2263 struct adiv5_dap *swjdp = armv8->arm.dap;
2264 uint8_t apsel = swjdp->apsel;
2265
2266 /* aarch64 handles unaligned memory access */
2267 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2268 "; count %" PRId32, address, size, count);
2269
2270 /* determine if MMU was enabled on target stop */
2271 if (!armv8->is_armv7r) {
2272 retval = aarch64_mmu(target, &mmu_enabled);
2273 if (retval != ERROR_OK)
2274 return retval;
2275 }
2276
2277 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2278 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2279 PRId32 "; count %" PRId32, address, size, count);
2280 if (mmu_enabled) {
2281 virt = address;
2282 retval = aarch64_virt2phys(target, virt, &phys);
2283 if (retval != ERROR_OK)
2284 return retval;
2285
2286 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2287 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2288 address = phys;
2289 }
2290 retval = aarch64_write_phys_memory(target, address, size,
2291 count, buffer);
2292 } else {
2293 if (mmu_enabled) {
2294 retval = aarch64_check_address(target, address);
2295 if (retval != ERROR_OK)
2296 return retval;
2297 /* enable MMU as we could have disabled it for phys access */
2298 retval = aarch64_mmu_modify(target, 1);
2299 if (retval != ERROR_OK)
2300 return retval;
2301 }
2302 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2303 }
2304 return retval;
2305 }
2306
2307 static int aarch64_handle_target_request(void *priv)
2308 {
2309 struct target *target = priv;
2310 struct armv8_common *armv8 = target_to_armv8(target);
2311 int retval;
2312
2313 if (!target_was_examined(target))
2314 return ERROR_OK;
2315 if (!target->dbg_msg_enabled)
2316 return ERROR_OK;
2317
2318 if (target->state == TARGET_RUNNING) {
2319 uint32_t request;
2320 uint32_t dscr;
2321 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2322 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2323
2324 /* check if we have data */
2325 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2326 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2327 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2328 if (retval == ERROR_OK) {
2329 target_request(target, request);
2330 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2331 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2332 }
2333 }
2334 }
2335
2336 return ERROR_OK;
2337 }
2338
2339 static int aarch64_examine_first(struct target *target)
2340 {
2341 struct aarch64_common *aarch64 = target_to_aarch64(target);
2342 struct armv8_common *armv8 = &aarch64->armv8_common;
2343 struct adiv5_dap *swjdp = armv8->arm.dap;
2344 int retval = ERROR_OK;
2345 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2346 int i;
2347
2348 /* We do one extra read to ensure DAP is configured,
2349 * we call ahbap_debugport_init(swjdp) instead
2350 */
2351 retval = dap_dp_init(swjdp);
2352 if (retval != ERROR_OK)
2353 return retval;
2354
2355 /* Search for the APB-AB - it is needed for access to debug registers */
2356 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2357 if (retval != ERROR_OK) {
2358 LOG_ERROR("Could not find APB-AP for debug access");
2359 return retval;
2360 }
2361
2362 retval = mem_ap_init(armv8->debug_ap);
2363 if (retval != ERROR_OK) {
2364 LOG_ERROR("Could not initialize the APB-AP");
2365 return retval;
2366 }
2367
2368 armv8->debug_ap->memaccess_tck = 80;
2369
2370 /* Search for the AHB-AB */
2371 armv8->memory_ap_available = false;
2372 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2373 if (retval == ERROR_OK) {
2374 retval = mem_ap_init(armv8->memory_ap);
2375 if (retval == ERROR_OK)
2376 armv8->memory_ap_available = true;
2377 }
2378 if (retval != ERROR_OK) {
2379 /* AHB-AP not found or unavailable - use the CPU */
2380 LOG_DEBUG("No AHB-AP available for memory access");
2381 }
2382
2383
2384 if (!target->dbgbase_set) {
2385 uint32_t dbgbase;
2386 /* Get ROM Table base */
2387 uint32_t apid;
2388 int32_t coreidx = target->coreid;
2389 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2390 if (retval != ERROR_OK)
2391 return retval;
2392 /* Lookup 0x15 -- Processor DAP */
2393 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2394 &armv8->debug_base, &coreidx);
2395 if (retval != ERROR_OK)
2396 return retval;
2397 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2398 coreidx, armv8->debug_base);
2399 } else
2400 armv8->debug_base = target->dbgbase;
2401
2402 LOG_DEBUG("Target ctibase is 0x%x", target->ctibase);
2403 if (target->ctibase == 0)
2404 armv8->cti_base = target->ctibase = armv8->debug_base + 0x1000;
2405 else
2406 armv8->cti_base = target->ctibase;
2407
2408 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2409 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2410 if (retval != ERROR_OK) {
2411 LOG_DEBUG("Examine %s failed", "oslock");
2412 return retval;
2413 }
2414
2415 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2416 armv8->debug_base + 0x88, &cpuid);
2417 LOG_DEBUG("0x88 = %x", cpuid);
2418
2419 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2420 armv8->debug_base + 0x314, &cpuid);
2421 LOG_DEBUG("0x314 = %x", cpuid);
2422
2423 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2424 armv8->debug_base + 0x310, &cpuid);
2425 LOG_DEBUG("0x310 = %x", cpuid);
2426 if (retval != ERROR_OK)
2427 return retval;
2428
2429 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2430 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2431 if (retval != ERROR_OK) {
2432 LOG_DEBUG("Examine %s failed", "CPUID");
2433 return retval;
2434 }
2435
2436 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2437 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2438 if (retval != ERROR_OK) {
2439 LOG_DEBUG("Examine %s failed", "CTYPR");
2440 return retval;
2441 }
2442
2443 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2444 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2445 if (retval != ERROR_OK) {
2446 LOG_DEBUG("Examine %s failed", "TTYPR");
2447 return retval;
2448 }
2449
2450 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2451 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2452 if (retval != ERROR_OK) {
2453 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2454 return retval;
2455 }
2456 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2457 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2458 if (retval != ERROR_OK) {
2459 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2460 return retval;
2461 }
2462
2463 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2464 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2465 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2466 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2467 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2468
2469 armv8->arm.core_type = ARM_MODE_MON;
2470 armv8->arm.core_state = ARM_STATE_AARCH64;
2471 retval = aarch64_dpm_setup(aarch64, debug);
2472 if (retval != ERROR_OK)
2473 return retval;
2474
2475 /* Setup Breakpoint Register Pairs */
2476 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2477 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2478
2479 /* hack - no context bpt support yet */
2480 aarch64->brp_num_context = 0;
2481
2482 aarch64->brp_num_available = aarch64->brp_num;
2483 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2484 for (i = 0; i < aarch64->brp_num; i++) {
2485 aarch64->brp_list[i].used = 0;
2486 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2487 aarch64->brp_list[i].type = BRP_NORMAL;
2488 else
2489 aarch64->brp_list[i].type = BRP_CONTEXT;
2490 aarch64->brp_list[i].value = 0;
2491 aarch64->brp_list[i].control = 0;
2492 aarch64->brp_list[i].BRPn = i;
2493 }
2494
2495 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2496
2497 target_set_examined(target);
2498 return ERROR_OK;
2499 }
2500
2501 static int aarch64_examine(struct target *target)
2502 {
2503 int retval = ERROR_OK;
2504
2505 /* don't re-probe hardware after each reset */
2506 if (!target_was_examined(target))
2507 retval = aarch64_examine_first(target);
2508
2509 /* Configure core debug access */
2510 if (retval == ERROR_OK)
2511 retval = aarch64_init_debug_access(target);
2512
2513 return retval;
2514 }
2515
2516 /*
2517 * Cortex-A8 target creation and initialization
2518 */
2519
2520 static int aarch64_init_target(struct command_context *cmd_ctx,
2521 struct target *target)
2522 {
2523 /* examine_first() does a bunch of this */
2524 return ERROR_OK;
2525 }
2526
2527 static int aarch64_init_arch_info(struct target *target,
2528 struct aarch64_common *aarch64, struct jtag_tap *tap)
2529 {
2530 struct armv8_common *armv8 = &aarch64->armv8_common;
2531 struct adiv5_dap *dap = armv8->arm.dap;
2532
2533 armv8->arm.dap = dap;
2534
2535 /* Setup struct aarch64_common */
2536 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2537 /* tap has no dap initialized */
2538 if (!tap->dap) {
2539 tap->dap = dap_init();
2540
2541 /* Leave (only) generic DAP stuff for debugport_init() */
2542 tap->dap->tap = tap;
2543 }
2544
2545 armv8->arm.dap = tap->dap;
2546
2547 aarch64->fast_reg_read = 0;
2548
2549 /* register arch-specific functions */
2550 armv8->examine_debug_reason = NULL;
2551
2552 armv8->post_debug_entry = aarch64_post_debug_entry;
2553
2554 armv8->pre_restore_context = NULL;
2555
2556 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2557
2558 /* REVISIT v7a setup should be in a v7a-specific routine */
2559 armv8_init_arch_info(target, armv8);
2560 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2561
2562 return ERROR_OK;
2563 }
2564
2565 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2566 {
2567 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2568
2569 aarch64->armv8_common.is_armv7r = false;
2570
2571 return aarch64_init_arch_info(target, aarch64, target->tap);
2572 }
2573
2574 static int aarch64_mmu(struct target *target, int *enabled)
2575 {
2576 if (target->state != TARGET_HALTED) {
2577 LOG_ERROR("%s: target not halted", __func__);
2578 return ERROR_TARGET_INVALID;
2579 }
2580
2581 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2582 return ERROR_OK;
2583 }
2584
2585 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2586 target_addr_t *phys)
2587 {
2588 int retval = ERROR_FAIL;
2589 struct armv8_common *armv8 = target_to_armv8(target);
2590 struct adiv5_dap *swjdp = armv8->arm.dap;
2591 uint8_t apsel = swjdp->apsel;
2592 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2593 uint32_t ret;
2594 retval = armv8_mmu_translate_va(target,
2595 virt, &ret);
2596 if (retval != ERROR_OK)
2597 goto done;
2598 *phys = ret;
2599 } else {/* use this method if armv8->memory_ap not selected
2600 * mmu must be enable in order to get a correct translation */
2601 retval = aarch64_mmu_modify(target, 1);
2602 if (retval != ERROR_OK)
2603 goto done;
2604 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2605 }
2606 done:
2607 return retval;
2608 }
2609
2610 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2611 {
2612 struct target *target = get_current_target(CMD_CTX);
2613 struct armv8_common *armv8 = target_to_armv8(target);
2614
2615 return armv8_handle_cache_info_command(CMD_CTX,
2616 &armv8->armv8_mmu.armv8_cache);
2617 }
2618
2619
2620 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2621 {
2622 struct target *target = get_current_target(CMD_CTX);
2623 if (!target_was_examined(target)) {
2624 LOG_ERROR("target not examined yet");
2625 return ERROR_FAIL;
2626 }
2627
2628 return aarch64_init_debug_access(target);
2629 }
2630 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2631 {
2632 struct target *target = get_current_target(CMD_CTX);
2633 /* check target is an smp target */
2634 struct target_list *head;
2635 struct target *curr;
2636 head = target->head;
2637 target->smp = 0;
2638 if (head != (struct target_list *)NULL) {
2639 while (head != (struct target_list *)NULL) {
2640 curr = head->target;
2641 curr->smp = 0;
2642 head = head->next;
2643 }
2644 /* fixes the target display to the debugger */
2645 target->gdb_service->target = target;
2646 }
2647 return ERROR_OK;
2648 }
2649
2650 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2651 {
2652 struct target *target = get_current_target(CMD_CTX);
2653 struct target_list *head;
2654 struct target *curr;
2655 head = target->head;
2656 if (head != (struct target_list *)NULL) {
2657 target->smp = 1;
2658 while (head != (struct target_list *)NULL) {
2659 curr = head->target;
2660 curr->smp = 1;
2661 head = head->next;
2662 }
2663 }
2664 return ERROR_OK;
2665 }
2666
2667 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2668 {
2669 struct target *target = get_current_target(CMD_CTX);
2670 int retval = ERROR_OK;
2671 struct target_list *head;
2672 head = target->head;
2673 if (head != (struct target_list *)NULL) {
2674 if (CMD_ARGC == 1) {
2675 int coreid = 0;
2676 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2677 if (ERROR_OK != retval)
2678 return retval;
2679 target->gdb_service->core[1] = coreid;
2680
2681 }
2682 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2683 , target->gdb_service->core[1]);
2684 }
2685 return ERROR_OK;
2686 }
2687
2688 static const struct command_registration aarch64_exec_command_handlers[] = {
2689 {
2690 .name = "cache_info",
2691 .handler = aarch64_handle_cache_info_command,
2692 .mode = COMMAND_EXEC,
2693 .help = "display information about target caches",
2694 .usage = "",
2695 },
2696 {
2697 .name = "dbginit",
2698 .handler = aarch64_handle_dbginit_command,
2699 .mode = COMMAND_EXEC,
2700 .help = "Initialize core debug",
2701 .usage = "",
2702 },
2703 { .name = "smp_off",
2704 .handler = aarch64_handle_smp_off_command,
2705 .mode = COMMAND_EXEC,
2706 .help = "Stop smp handling",
2707 .usage = "",
2708 },
2709 {
2710 .name = "smp_on",
2711 .handler = aarch64_handle_smp_on_command,
2712 .mode = COMMAND_EXEC,
2713 .help = "Restart smp handling",
2714 .usage = "",
2715 },
2716 {
2717 .name = "smp_gdb",
2718 .handler = aarch64_handle_smp_gdb_command,
2719 .mode = COMMAND_EXEC,
2720 .help = "display/fix current core played to gdb",
2721 .usage = "",
2722 },
2723
2724
2725 COMMAND_REGISTRATION_DONE
2726 };
2727 static const struct command_registration aarch64_command_handlers[] = {
2728 {
2729 .chain = arm_command_handlers,
2730 },
2731 {
2732 .chain = armv8_command_handlers,
2733 },
2734 {
2735 .name = "cortex_a",
2736 .mode = COMMAND_ANY,
2737 .help = "Cortex-A command group",
2738 .usage = "",
2739 .chain = aarch64_exec_command_handlers,
2740 },
2741 COMMAND_REGISTRATION_DONE
2742 };
2743
2744 struct target_type aarch64_target = {
2745 .name = "aarch64",
2746
2747 .poll = aarch64_poll,
2748 .arch_state = armv8_arch_state,
2749
2750 .halt = aarch64_halt,
2751 .resume = aarch64_resume,
2752 .step = aarch64_step,
2753
2754 .assert_reset = aarch64_assert_reset,
2755 .deassert_reset = aarch64_deassert_reset,
2756
2757 /* REVISIT allow exporting VFP3 registers ... */
2758 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2759
2760 .read_memory = aarch64_read_memory,
2761 .write_memory = aarch64_write_memory,
2762
2763 .checksum_memory = arm_checksum_memory,
2764 .blank_check_memory = arm_blank_check_memory,
2765
2766 .run_algorithm = armv4_5_run_algorithm,
2767
2768 .add_breakpoint = aarch64_add_breakpoint,
2769 .add_context_breakpoint = aarch64_add_context_breakpoint,
2770 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2771 .remove_breakpoint = aarch64_remove_breakpoint,
2772 .add_watchpoint = NULL,
2773 .remove_watchpoint = NULL,
2774
2775 .commands = aarch64_command_handlers,
2776 .target_create = aarch64_target_create,
2777 .init_target = aarch64_init_target,
2778 .examine = aarch64_examine,
2779
2780 .read_phys_memory = aarch64_read_phys_memory,
2781 .write_phys_memory = aarch64_write_phys_memory,
2782 .mmu = aarch64_mmu,
2783 .virt2phys = aarch64_virt2phys,
2784 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)